]> git.ipfire.org Git - thirdparty/grsecurity-scrape.git/blame - test/grsecurity-3.0-3.14.6-201406101411.patch
Auto commit, 1 new patch{es}.
[thirdparty/grsecurity-scrape.git] / test / grsecurity-3.0-3.14.6-201406101411.patch
CommitLineData
3330db90
PK
1diff --git a/Documentation/dontdiff b/Documentation/dontdiff
2index b89a739..e289b9b 100644
3--- a/Documentation/dontdiff
4+++ b/Documentation/dontdiff
5@@ -2,9 +2,11 @@
6 *.aux
7 *.bin
8 *.bz2
9+*.c.[012]*.*
10 *.cis
11 *.cpio
12 *.csp
13+*.dbg
14 *.dsp
15 *.dvi
16 *.elf
17@@ -14,6 +16,7 @@
18 *.gcov
19 *.gen.S
20 *.gif
21+*.gmo
22 *.grep
23 *.grp
24 *.gz
25@@ -48,14 +51,17 @@
26 *.tab.h
27 *.tex
28 *.ver
29+*.vim
30 *.xml
31 *.xz
32 *_MODULES
33+*_reg_safe.h
34 *_vga16.c
35 *~
36 \#*#
37 *.9
38-.*
39+.[^g]*
40+.gen*
41 .*.d
42 .mm
43 53c700_d.h
44@@ -69,9 +75,11 @@ Image
45 Module.markers
46 Module.symvers
47 PENDING
48+PERF*
49 SCCS
50 System.map*
51 TAGS
52+TRACEEVENT-CFLAGS
53 aconf
54 af_names.h
55 aic7*reg.h*
56@@ -80,6 +88,7 @@ aic7*seq.h*
57 aicasm
58 aicdb.h*
59 altivec*.c
60+ashldi3.S
61 asm-offsets.h
62 asm_offsets.h
63 autoconf.h*
64@@ -92,32 +101,40 @@ bounds.h
65 bsetup
66 btfixupprep
67 build
68+builtin-policy.h
69 bvmlinux
70 bzImage*
71 capability_names.h
72 capflags.c
73 classlist.h*
74+clut_vga16.c
75+common-cmds.h
76 comp*.log
77 compile.h*
78 conf
79 config
80 config-*
81 config_data.h*
82+config.c
83 config.mak
84 config.mak.autogen
85+config.tmp
86 conmakehash
87 consolemap_deftbl.c*
88 cpustr.h
89 crc32table.h*
90 cscope.*
91 defkeymap.c
92+devicetable-offsets.h
93 devlist.h*
94 dnotify_test
95 docproc
96 dslm
97+dtc-lexer.lex.c
98 elf2ecoff
99 elfconfig.h*
100 evergreen_reg_safe.h
101+exception_policy.conf
102 fixdep
103 flask.h
104 fore200e_mkfirm
105@@ -125,12 +142,15 @@ fore200e_pca_fw.c*
106 gconf
107 gconf.glade.h
108 gen-devlist
109+gen-kdb_cmds.c
110 gen_crc32table
111 gen_init_cpio
112 generated
113 genheaders
114 genksyms
115 *_gray256.c
116+hash
117+hid-example
118 hpet_example
119 hugepage-mmap
120 hugepage-shm
121@@ -145,14 +165,14 @@ int32.c
122 int4.c
123 int8.c
124 kallsyms
125-kconfig
126+kern_constants.h
127 keywords.c
128 ksym.c*
129 ksym.h*
130 kxgettext
131 lex.c
132 lex.*.c
133-linux
134+lib1funcs.S
135 logo_*.c
136 logo_*_clut224.c
137 logo_*_mono.c
138@@ -162,14 +182,15 @@ mach-types.h
139 machtypes.h
140 map
141 map_hugetlb
142-media
143 mconf
144+mdp
145 miboot*
146 mk_elfconfig
147 mkboot
148 mkbugboot
149 mkcpustr
150 mkdep
151+mkpiggy
152 mkprep
153 mkregtable
154 mktables
155@@ -185,6 +206,8 @@ oui.c*
156 page-types
157 parse.c
158 parse.h
159+parse-events*
160+pasyms.h
161 patches*
162 pca200e.bin
163 pca200e_ecd.bin2
164@@ -194,6 +217,7 @@ perf-archive
165 piggyback
166 piggy.gzip
167 piggy.S
168+pmu-*
169 pnmtologo
170 ppc_defs.h*
171 pss_boot.h
172@@ -203,7 +227,12 @@ r200_reg_safe.h
173 r300_reg_safe.h
174 r420_reg_safe.h
175 r600_reg_safe.h
176+randomize_layout_hash.h
177+randomize_layout_seed.h
178+realmode.lds
179+realmode.relocs
180 recordmcount
181+regdb.c
182 relocs
183 rlim_names.h
184 rn50_reg_safe.h
185@@ -213,8 +242,12 @@ series
186 setup
187 setup.bin
188 setup.elf
189+signing_key*
190+size_overflow_hash.h
191 sImage
192+slabinfo
193 sm_tbl*
194+sortextable
195 split-include
196 syscalltab.h
197 tables.c
198@@ -224,6 +257,7 @@ tftpboot.img
199 timeconst.h
200 times.h*
201 trix_boot.h
202+user_constants.h
203 utsrelease.h*
204 vdso-syms.lds
205 vdso.lds
206@@ -235,13 +269,17 @@ vdso32.lds
207 vdso32.so.dbg
208 vdso64.lds
209 vdso64.so.dbg
210+vdsox32.lds
211+vdsox32-syms.lds
212 version.h*
213 vmImage
214 vmlinux
215 vmlinux-*
216 vmlinux.aout
217 vmlinux.bin.all
218+vmlinux.bin.bz2
219 vmlinux.lds
220+vmlinux.relocs
221 vmlinuz
222 voffset.h
223 vsyscall.lds
224@@ -249,9 +287,12 @@ vsyscall_32.lds
225 wanxlfw.inc
226 uImage
227 unifdef
228+utsrelease.h
229 wakeup.bin
230 wakeup.elf
231 wakeup.lds
232+x509*
233 zImage*
234 zconf.hash.c
235+zconf.lex.c
236 zoffset.h
237diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
238index 7116fda..d8ed6e8 100644
239--- a/Documentation/kernel-parameters.txt
240+++ b/Documentation/kernel-parameters.txt
241@@ -1084,6 +1084,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
242 Format: <unsigned int> such that (rxsize & ~0x1fffc0) == 0.
243 Default: 1024
244
245+ grsec_proc_gid= [GRKERNSEC_PROC_USERGROUP] Chooses GID to
246+ ignore grsecurity's /proc restrictions
247+
248+
249 hashdist= [KNL,NUMA] Large hashes allocated during boot
250 are distributed across NUMA nodes. Defaults on
251 for 64-bit NUMA, off otherwise.
252@@ -2080,6 +2084,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
253 noexec=on: enable non-executable mappings (default)
254 noexec=off: disable non-executable mappings
255
256+ nopcid [X86-64]
257+ Disable PCID (Process-Context IDentifier) even if it
258+ is supported by the processor.
259+
260 nosmap [X86]
261 Disable SMAP (Supervisor Mode Access Prevention)
262 even if it is supported by processor.
263@@ -2347,6 +2355,25 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
264 the specified number of seconds. This is to be used if
265 your oopses keep scrolling off the screen.
266
267+ pax_nouderef [X86] disables UDEREF. Most likely needed under certain
268+ virtualization environments that don't cope well with the
269+ expand down segment used by UDEREF on X86-32 or the frequent
270+ page table updates on X86-64.
271+
272+ pax_sanitize_slab=
273+ 0/1 to disable/enable slab object sanitization (enabled by
274+ default).
275+
276+ pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
277+
278+ pax_extra_latent_entropy
279+ Enable a very simple form of latent entropy extraction
280+ from the first 4GB of memory as the bootmem allocator
281+ passes the memory pages to the buddy allocator.
282+
283+ pax_weakuderef [X86-64] enables the weaker but faster form of UDEREF
284+ when the processor supports PCID.
285+
286 pcbit= [HW,ISDN]
287
288 pcd. [PARIDE]
289diff --git a/Makefile b/Makefile
290index 0d499e6..2318683 100644
291--- a/Makefile
292+++ b/Makefile
293@@ -244,8 +244,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
294
295 HOSTCC = gcc
296 HOSTCXX = g++
297-HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer
298-HOSTCXXFLAGS = -O2
299+HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks
300+HOSTCFLAGS += $(call cc-option, -Wno-empty-body)
301+HOSTCXXFLAGS = -O2 -Wall -W -Wno-array-bounds
302
303 # Decide whether to build built-in, modular, or both.
304 # Normally, just do built-in.
305@@ -423,8 +424,8 @@ export RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn \
306 # Rules shared between *config targets and build targets
307
308 # Basic helpers built in scripts/
309-PHONY += scripts_basic
310-scripts_basic:
311+PHONY += scripts_basic gcc-plugins
312+scripts_basic: gcc-plugins
313 $(Q)$(MAKE) $(build)=scripts/basic
314 $(Q)rm -f .tmp_quiet_recordmcount
315
316@@ -585,6 +586,72 @@ else
317 KBUILD_CFLAGS += -O2
318 endif
319
320+ifndef DISABLE_PAX_PLUGINS
321+ifeq ($(call cc-ifversion, -ge, 0408, y), y)
322+PLUGINCC := $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCXX)" "$(HOSTCXX)" "$(CC)")
323+else
324+PLUGINCC := $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCC)" "$(HOSTCXX)" "$(CC)")
325+endif
326+ifneq ($(PLUGINCC),)
327+ifdef CONFIG_PAX_CONSTIFY_PLUGIN
328+CONSTIFY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/constify_plugin.so -DCONSTIFY_PLUGIN
329+endif
330+ifdef CONFIG_PAX_MEMORY_STACKLEAK
331+STACKLEAK_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -DSTACKLEAK_PLUGIN
332+STACKLEAK_PLUGIN_CFLAGS += -fplugin-arg-stackleak_plugin-track-lowest-sp=100
333+endif
334+ifdef CONFIG_KALLOCSTAT_PLUGIN
335+KALLOCSTAT_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kallocstat_plugin.so
336+endif
337+ifdef CONFIG_PAX_KERNEXEC_PLUGIN
338+KERNEXEC_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kernexec_plugin.so
339+KERNEXEC_PLUGIN_CFLAGS += -fplugin-arg-kernexec_plugin-method=$(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD) -DKERNEXEC_PLUGIN
340+KERNEXEC_PLUGIN_AFLAGS := -DKERNEXEC_PLUGIN
341+endif
342+ifdef CONFIG_GRKERNSEC_RANDSTRUCT
343+RANDSTRUCT_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/randomize_layout_plugin.so -DRANDSTRUCT_PLUGIN
344+ifdef CONFIG_GRKERNSEC_RANDSTRUCT_PERFORMANCE
345+RANDSTRUCT_PLUGIN_CFLAGS += -fplugin-arg-randomize_layout_plugin-performance-mode
346+endif
347+endif
348+ifdef CONFIG_CHECKER_PLUGIN
349+ifeq ($(call cc-ifversion, -ge, 0406, y), y)
350+CHECKER_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/checker_plugin.so -DCHECKER_PLUGIN
351+endif
352+endif
353+COLORIZE_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/colorize_plugin.so
354+ifdef CONFIG_PAX_SIZE_OVERFLOW
355+SIZE_OVERFLOW_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/size_overflow_plugin/size_overflow_plugin.so -DSIZE_OVERFLOW_PLUGIN
356+endif
357+ifdef CONFIG_PAX_LATENT_ENTROPY
358+LATENT_ENTROPY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/latent_entropy_plugin.so -DLATENT_ENTROPY_PLUGIN
359+endif
360+ifdef CONFIG_PAX_MEMORY_STRUCTLEAK
361+STRUCTLEAK_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/structleak_plugin.so -DSTRUCTLEAK_PLUGIN
362+endif
363+GCC_PLUGINS_CFLAGS := $(CONSTIFY_PLUGIN_CFLAGS) $(STACKLEAK_PLUGIN_CFLAGS) $(KALLOCSTAT_PLUGIN_CFLAGS)
364+GCC_PLUGINS_CFLAGS += $(KERNEXEC_PLUGIN_CFLAGS) $(CHECKER_PLUGIN_CFLAGS) $(COLORIZE_PLUGIN_CFLAGS)
365+GCC_PLUGINS_CFLAGS += $(SIZE_OVERFLOW_PLUGIN_CFLAGS) $(LATENT_ENTROPY_PLUGIN_CFLAGS) $(STRUCTLEAK_PLUGIN_CFLAGS)
366+GCC_PLUGINS_CFLAGS += $(RANDSTRUCT_PLUGIN_CFLAGS)
367+GCC_PLUGINS_AFLAGS := $(KERNEXEC_PLUGIN_AFLAGS)
368+export PLUGINCC GCC_PLUGINS_CFLAGS GCC_PLUGINS_AFLAGS CONSTIFY_PLUGIN LATENT_ENTROPY_PLUGIN_CFLAGS
369+ifeq ($(KBUILD_EXTMOD),)
370+gcc-plugins:
371+ $(Q)$(MAKE) $(build)=tools/gcc
372+else
373+gcc-plugins: ;
374+endif
375+else
376+gcc-plugins:
377+ifeq ($(call cc-ifversion, -ge, 0405, y), y)
378+ $(error Your gcc installation does not support plugins. If the necessary headers for plugin support are missing, they should be installed. On Debian, apt-get install gcc-<ver>-plugin-dev. If you choose to ignore this error and lessen the improvements provided by this patch, re-run make with the DISABLE_PAX_PLUGINS=y argument.))
379+else
380+ $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least"
381+endif
382+ $(Q)echo "PAX_MEMORY_STACKLEAK, constification, PAX_LATENT_ENTROPY and other features will be less secure. PAX_SIZE_OVERFLOW will not be active."
383+endif
384+endif
385+
386 include $(srctree)/arch/$(SRCARCH)/Makefile
387
388 ifdef CONFIG_READABLE_ASM
389@@ -779,7 +846,7 @@ export mod_sign_cmd
390
391
392 ifeq ($(KBUILD_EXTMOD),)
393-core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
394+core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
395
396 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
397 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
398@@ -828,6 +895,8 @@ endif
399
400 # The actual objects are generated when descending,
401 # make sure no implicit rule kicks in
402+$(filter-out $(init-y),$(vmlinux-deps)): KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
403+$(filter-out $(init-y),$(vmlinux-deps)): KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
404 $(sort $(vmlinux-deps)): $(vmlinux-dirs) ;
405
406 # Handle descending into subdirectories listed in $(vmlinux-dirs)
407@@ -837,7 +906,7 @@ $(sort $(vmlinux-deps)): $(vmlinux-dirs) ;
408 # Error messages still appears in the original language
409
410 PHONY += $(vmlinux-dirs)
411-$(vmlinux-dirs): prepare scripts
412+$(vmlinux-dirs): gcc-plugins prepare scripts
413 $(Q)$(MAKE) $(build)=$@
414
415 define filechk_kernel.release
416@@ -880,10 +949,13 @@ prepare1: prepare2 $(version_h) include/generated/utsrelease.h \
417
418 archprepare: archheaders archscripts prepare1 scripts_basic
419
420+prepare0: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
421+prepare0: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
422 prepare0: archprepare FORCE
423 $(Q)$(MAKE) $(build)=.
424
425 # All the preparing..
426+prepare: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS))
427 prepare: prepare0
428
429 # Generate some files
430@@ -991,6 +1063,8 @@ all: modules
431 # using awk while concatenating to the final file.
432
433 PHONY += modules
434+modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
435+modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
436 modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux) modules.builtin
437 $(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
438 @$(kecho) ' Building modules, stage 2.';
439@@ -1006,7 +1080,7 @@ modules.builtin: $(vmlinux-dirs:%=%/modules.builtin)
440
441 # Target to prepare building external modules
442 PHONY += modules_prepare
443-modules_prepare: prepare scripts
444+modules_prepare: gcc-plugins prepare scripts
445
446 # Target to install modules
447 PHONY += modules_install
448@@ -1072,7 +1146,10 @@ MRPROPER_FILES += .config .config.old .version .old_version $(version_h) \
449 Module.symvers tags TAGS cscope* GPATH GTAGS GRTAGS GSYMS \
450 signing_key.priv signing_key.x509 x509.genkey \
451 extra_certificates signing_key.x509.keyid \
452- signing_key.x509.signer
453+ signing_key.x509.signer \
454+ tools/gcc/size_overflow_plugin/size_overflow_hash_aux.h \
455+ tools/gcc/size_overflow_plugin/size_overflow_hash.h \
456+ tools/gcc/randomize_layout_seed.h
457
458 # clean - Delete most, but leave enough to build external modules
459 #
460@@ -1111,7 +1188,7 @@ distclean: mrproper
461 @find $(srctree) $(RCS_FIND_IGNORE) \
462 \( -name '*.orig' -o -name '*.rej' -o -name '*~' \
463 -o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
464- -o -name '.*.rej' \
465+ -o -name '.*.rej' -o -name '*.so' \
466 -o -name '*%' -o -name '.*.cmd' -o -name 'core' \) \
467 -type f -print | xargs rm -f
468
469@@ -1273,6 +1350,8 @@ PHONY += $(module-dirs) modules
470 $(module-dirs): crmodverdir $(objtree)/Module.symvers
471 $(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@)
472
473+modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
474+modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
475 modules: $(module-dirs)
476 @$(kecho) ' Building modules, stage 2.';
477 $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
478@@ -1412,17 +1491,21 @@ else
479 target-dir = $(if $(KBUILD_EXTMOD),$(dir $<),$(dir $@))
480 endif
481
482-%.s: %.c prepare scripts FORCE
483+%.s: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
484+%.s: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
485+%.s: %.c gcc-plugins prepare scripts FORCE
486 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
487 %.i: %.c prepare scripts FORCE
488 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
489-%.o: %.c prepare scripts FORCE
490+%.o: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
491+%.o: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
492+%.o: %.c gcc-plugins prepare scripts FORCE
493 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
494 %.lst: %.c prepare scripts FORCE
495 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
496-%.s: %.S prepare scripts FORCE
497+%.s: %.S gcc-plugins prepare scripts FORCE
498 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
499-%.o: %.S prepare scripts FORCE
500+%.o: %.S gcc-plugins prepare scripts FORCE
501 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
502 %.symtypes: %.c prepare scripts FORCE
503 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
504@@ -1432,11 +1515,15 @@ endif
505 $(cmd_crmodverdir)
506 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
507 $(build)=$(build-dir)
508-%/: prepare scripts FORCE
509+%/: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
510+%/: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
511+%/: gcc-plugins prepare scripts FORCE
512 $(cmd_crmodverdir)
513 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
514 $(build)=$(build-dir)
515-%.ko: prepare scripts FORCE
516+%.ko: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
517+%.ko: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
518+%.ko: gcc-plugins prepare scripts FORCE
519 $(cmd_crmodverdir)
520 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
521 $(build)=$(build-dir) $(@:.ko=.o)
522diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h
523index 78b03ef..da28a51 100644
524--- a/arch/alpha/include/asm/atomic.h
525+++ b/arch/alpha/include/asm/atomic.h
526@@ -292,6 +292,16 @@ static inline long atomic64_dec_if_positive(atomic64_t *v)
527 #define atomic_dec(v) atomic_sub(1,(v))
528 #define atomic64_dec(v) atomic64_sub(1,(v))
529
530+#define atomic64_read_unchecked(v) atomic64_read(v)
531+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
532+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
533+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
534+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
535+#define atomic64_inc_unchecked(v) atomic64_inc(v)
536+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
537+#define atomic64_dec_unchecked(v) atomic64_dec(v)
538+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
539+
540 #define smp_mb__before_atomic_dec() smp_mb()
541 #define smp_mb__after_atomic_dec() smp_mb()
542 #define smp_mb__before_atomic_inc() smp_mb()
543diff --git a/arch/alpha/include/asm/cache.h b/arch/alpha/include/asm/cache.h
544index ad368a9..fbe0f25 100644
545--- a/arch/alpha/include/asm/cache.h
546+++ b/arch/alpha/include/asm/cache.h
547@@ -4,19 +4,19 @@
548 #ifndef __ARCH_ALPHA_CACHE_H
549 #define __ARCH_ALPHA_CACHE_H
550
551+#include <linux/const.h>
552
553 /* Bytes per L1 (data) cache line. */
554 #if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_EV6)
555-# define L1_CACHE_BYTES 64
556 # define L1_CACHE_SHIFT 6
557 #else
558 /* Both EV4 and EV5 are write-through, read-allocate,
559 direct-mapped, physical.
560 */
561-# define L1_CACHE_BYTES 32
562 # define L1_CACHE_SHIFT 5
563 #endif
564
565+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
566 #define SMP_CACHE_BYTES L1_CACHE_BYTES
567
568 #endif
569diff --git a/arch/alpha/include/asm/elf.h b/arch/alpha/include/asm/elf.h
570index 968d999..d36b2df 100644
571--- a/arch/alpha/include/asm/elf.h
572+++ b/arch/alpha/include/asm/elf.h
573@@ -91,6 +91,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
574
575 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
576
577+#ifdef CONFIG_PAX_ASLR
578+#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
579+
580+#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
581+#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
582+#endif
583+
584 /* $0 is set by ld.so to a pointer to a function which might be
585 registered using atexit. This provides a mean for the dynamic
586 linker to call DT_FINI functions for shared libraries that have
587diff --git a/arch/alpha/include/asm/pgalloc.h b/arch/alpha/include/asm/pgalloc.h
588index aab14a0..b4fa3e7 100644
589--- a/arch/alpha/include/asm/pgalloc.h
590+++ b/arch/alpha/include/asm/pgalloc.h
591@@ -29,6 +29,12 @@ pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
592 pgd_set(pgd, pmd);
593 }
594
595+static inline void
596+pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
597+{
598+ pgd_populate(mm, pgd, pmd);
599+}
600+
601 extern pgd_t *pgd_alloc(struct mm_struct *mm);
602
603 static inline void
604diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h
605index d8f9b7e..f6222fa 100644
606--- a/arch/alpha/include/asm/pgtable.h
607+++ b/arch/alpha/include/asm/pgtable.h
608@@ -102,6 +102,17 @@ struct vm_area_struct;
609 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
610 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
611 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
612+
613+#ifdef CONFIG_PAX_PAGEEXEC
614+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
615+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
616+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
617+#else
618+# define PAGE_SHARED_NOEXEC PAGE_SHARED
619+# define PAGE_COPY_NOEXEC PAGE_COPY
620+# define PAGE_READONLY_NOEXEC PAGE_READONLY
621+#endif
622+
623 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
624
625 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
626diff --git a/arch/alpha/kernel/module.c b/arch/alpha/kernel/module.c
627index 2fd00b7..cfd5069 100644
628--- a/arch/alpha/kernel/module.c
629+++ b/arch/alpha/kernel/module.c
630@@ -160,7 +160,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab,
631
632 /* The small sections were sorted to the end of the segment.
633 The following should definitely cover them. */
634- gp = (u64)me->module_core + me->core_size - 0x8000;
635+ gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
636 got = sechdrs[me->arch.gotsecindex].sh_addr;
637
638 for (i = 0; i < n; i++) {
639diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
640index 1402fcc..0b1abd2 100644
641--- a/arch/alpha/kernel/osf_sys.c
642+++ b/arch/alpha/kernel/osf_sys.c
643@@ -1298,10 +1298,11 @@ SYSCALL_DEFINE1(old_adjtimex, struct timex32 __user *, txc_p)
644 generic version except that we know how to honor ADDR_LIMIT_32BIT. */
645
646 static unsigned long
647-arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
648- unsigned long limit)
649+arch_get_unmapped_area_1(struct file *filp, unsigned long addr, unsigned long len,
650+ unsigned long limit, unsigned long flags)
651 {
652 struct vm_unmapped_area_info info;
653+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
654
655 info.flags = 0;
656 info.length = len;
657@@ -1309,6 +1310,7 @@ arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
658 info.high_limit = limit;
659 info.align_mask = 0;
660 info.align_offset = 0;
661+ info.threadstack_offset = offset;
662 return vm_unmapped_area(&info);
663 }
664
665@@ -1341,20 +1343,24 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
666 merely specific addresses, but regions of memory -- perhaps
667 this feature should be incorporated into all ports? */
668
669+#ifdef CONFIG_PAX_RANDMMAP
670+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
671+#endif
672+
673 if (addr) {
674- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
675+ addr = arch_get_unmapped_area_1 (filp, PAGE_ALIGN(addr), len, limit, flags);
676 if (addr != (unsigned long) -ENOMEM)
677 return addr;
678 }
679
680 /* Next, try allocating at TASK_UNMAPPED_BASE. */
681- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
682- len, limit);
683+ addr = arch_get_unmapped_area_1 (filp, PAGE_ALIGN(current->mm->mmap_base), len, limit, flags);
684+
685 if (addr != (unsigned long) -ENOMEM)
686 return addr;
687
688 /* Finally, try allocating in low memory. */
689- addr = arch_get_unmapped_area_1 (PAGE_SIZE, len, limit);
690+ addr = arch_get_unmapped_area_1 (filp, PAGE_SIZE, len, limit, flags);
691
692 return addr;
693 }
694diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
695index 98838a0..b304fb4 100644
696--- a/arch/alpha/mm/fault.c
697+++ b/arch/alpha/mm/fault.c
698@@ -53,6 +53,124 @@ __load_new_mm_context(struct mm_struct *next_mm)
699 __reload_thread(pcb);
700 }
701
702+#ifdef CONFIG_PAX_PAGEEXEC
703+/*
704+ * PaX: decide what to do with offenders (regs->pc = fault address)
705+ *
706+ * returns 1 when task should be killed
707+ * 2 when patched PLT trampoline was detected
708+ * 3 when unpatched PLT trampoline was detected
709+ */
710+static int pax_handle_fetch_fault(struct pt_regs *regs)
711+{
712+
713+#ifdef CONFIG_PAX_EMUPLT
714+ int err;
715+
716+ do { /* PaX: patched PLT emulation #1 */
717+ unsigned int ldah, ldq, jmp;
718+
719+ err = get_user(ldah, (unsigned int *)regs->pc);
720+ err |= get_user(ldq, (unsigned int *)(regs->pc+4));
721+ err |= get_user(jmp, (unsigned int *)(regs->pc+8));
722+
723+ if (err)
724+ break;
725+
726+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
727+ (ldq & 0xFFFF0000U) == 0xA77B0000U &&
728+ jmp == 0x6BFB0000U)
729+ {
730+ unsigned long r27, addr;
731+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
732+ unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
733+
734+ addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
735+ err = get_user(r27, (unsigned long *)addr);
736+ if (err)
737+ break;
738+
739+ regs->r27 = r27;
740+ regs->pc = r27;
741+ return 2;
742+ }
743+ } while (0);
744+
745+ do { /* PaX: patched PLT emulation #2 */
746+ unsigned int ldah, lda, br;
747+
748+ err = get_user(ldah, (unsigned int *)regs->pc);
749+ err |= get_user(lda, (unsigned int *)(regs->pc+4));
750+ err |= get_user(br, (unsigned int *)(regs->pc+8));
751+
752+ if (err)
753+ break;
754+
755+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
756+ (lda & 0xFFFF0000U) == 0xA77B0000U &&
757+ (br & 0xFFE00000U) == 0xC3E00000U)
758+ {
759+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
760+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
761+ unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
762+
763+ regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
764+ regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
765+ return 2;
766+ }
767+ } while (0);
768+
769+ do { /* PaX: unpatched PLT emulation */
770+ unsigned int br;
771+
772+ err = get_user(br, (unsigned int *)regs->pc);
773+
774+ if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
775+ unsigned int br2, ldq, nop, jmp;
776+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
777+
778+ addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
779+ err = get_user(br2, (unsigned int *)addr);
780+ err |= get_user(ldq, (unsigned int *)(addr+4));
781+ err |= get_user(nop, (unsigned int *)(addr+8));
782+ err |= get_user(jmp, (unsigned int *)(addr+12));
783+ err |= get_user(resolver, (unsigned long *)(addr+16));
784+
785+ if (err)
786+ break;
787+
788+ if (br2 == 0xC3600000U &&
789+ ldq == 0xA77B000CU &&
790+ nop == 0x47FF041FU &&
791+ jmp == 0x6B7B0000U)
792+ {
793+ regs->r28 = regs->pc+4;
794+ regs->r27 = addr+16;
795+ regs->pc = resolver;
796+ return 3;
797+ }
798+ }
799+ } while (0);
800+#endif
801+
802+ return 1;
803+}
804+
805+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
806+{
807+ unsigned long i;
808+
809+ printk(KERN_ERR "PAX: bytes at PC: ");
810+ for (i = 0; i < 5; i++) {
811+ unsigned int c;
812+ if (get_user(c, (unsigned int *)pc+i))
813+ printk(KERN_CONT "???????? ");
814+ else
815+ printk(KERN_CONT "%08x ", c);
816+ }
817+ printk("\n");
818+}
819+#endif
820
821 /*
822 * This routine handles page faults. It determines the address,
823@@ -133,8 +251,29 @@ retry:
824 good_area:
825 si_code = SEGV_ACCERR;
826 if (cause < 0) {
827- if (!(vma->vm_flags & VM_EXEC))
828+ if (!(vma->vm_flags & VM_EXEC)) {
829+
830+#ifdef CONFIG_PAX_PAGEEXEC
831+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
832+ goto bad_area;
833+
834+ up_read(&mm->mmap_sem);
835+ switch (pax_handle_fetch_fault(regs)) {
836+
837+#ifdef CONFIG_PAX_EMUPLT
838+ case 2:
839+ case 3:
840+ return;
841+#endif
842+
843+ }
844+ pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
845+ do_group_exit(SIGKILL);
846+#else
847 goto bad_area;
848+#endif
849+
850+ }
851 } else if (!cause) {
852 /* Allow reads even for write-only mappings */
853 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
854diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
855index 44298ad..29a20c0 100644
856--- a/arch/arm/Kconfig
857+++ b/arch/arm/Kconfig
858@@ -1862,7 +1862,7 @@ config ALIGNMENT_TRAP
859
860 config UACCESS_WITH_MEMCPY
861 bool "Use kernel mem{cpy,set}() for {copy_to,clear}_user()"
862- depends on MMU
863+ depends on MMU && !PAX_MEMORY_UDEREF
864 default y if CPU_FEROCEON
865 help
866 Implement faster copy_to_user and clear_user methods for CPU
867@@ -2125,6 +2125,7 @@ config XIP_PHYS_ADDR
868 config KEXEC
869 bool "Kexec system call (EXPERIMENTAL)"
870 depends on (!SMP || PM_SLEEP_SMP)
871+ depends on !GRKERNSEC_KMEM
872 help
873 kexec is a system call that implements the ability to shutdown your
874 current kernel, and to start another kernel. It is like a reboot
875diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
876index 62d2cb5..09d45e3 100644
877--- a/arch/arm/include/asm/atomic.h
878+++ b/arch/arm/include/asm/atomic.h
879@@ -18,17 +18,35 @@
880 #include <asm/barrier.h>
881 #include <asm/cmpxchg.h>
882
883+#ifdef CONFIG_GENERIC_ATOMIC64
884+#include <asm-generic/atomic64.h>
885+#endif
886+
887 #define ATOMIC_INIT(i) { (i) }
888
889 #ifdef __KERNEL__
890
891+#define _ASM_EXTABLE(from, to) \
892+" .pushsection __ex_table,\"a\"\n"\
893+" .align 3\n" \
894+" .long " #from ", " #to"\n" \
895+" .popsection"
896+
897 /*
898 * On ARM, ordinary assignment (str instruction) doesn't clear the local
899 * strex/ldrex monitor on some implementations. The reason we can use it for
900 * atomic_set() is the clrex or dummy strex done on every exception return.
901 */
902 #define atomic_read(v) (*(volatile int *)&(v)->counter)
903+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
904+{
905+ return v->counter;
906+}
907 #define atomic_set(v,i) (((v)->counter) = (i))
908+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
909+{
910+ v->counter = i;
911+}
912
913 #if __LINUX_ARM_ARCH__ >= 6
914
915@@ -44,6 +62,36 @@ static inline void atomic_add(int i, atomic_t *v)
916
917 prefetchw(&v->counter);
918 __asm__ __volatile__("@ atomic_add\n"
919+"1: ldrex %1, [%3]\n"
920+" adds %0, %1, %4\n"
921+
922+#ifdef CONFIG_PAX_REFCOUNT
923+" bvc 3f\n"
924+"2: bkpt 0xf103\n"
925+"3:\n"
926+#endif
927+
928+" strex %1, %0, [%3]\n"
929+" teq %1, #0\n"
930+" bne 1b"
931+
932+#ifdef CONFIG_PAX_REFCOUNT
933+"\n4:\n"
934+ _ASM_EXTABLE(2b, 4b)
935+#endif
936+
937+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
938+ : "r" (&v->counter), "Ir" (i)
939+ : "cc");
940+}
941+
942+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
943+{
944+ unsigned long tmp;
945+ int result;
946+
947+ prefetchw(&v->counter);
948+ __asm__ __volatile__("@ atomic_add_unchecked\n"
949 "1: ldrex %0, [%3]\n"
950 " add %0, %0, %4\n"
951 " strex %1, %0, [%3]\n"
952@@ -62,6 +110,42 @@ static inline int atomic_add_return(int i, atomic_t *v)
953 smp_mb();
954
955 __asm__ __volatile__("@ atomic_add_return\n"
956+"1: ldrex %1, [%3]\n"
957+" adds %0, %1, %4\n"
958+
959+#ifdef CONFIG_PAX_REFCOUNT
960+" bvc 3f\n"
961+" mov %0, %1\n"
962+"2: bkpt 0xf103\n"
963+"3:\n"
964+#endif
965+
966+" strex %1, %0, [%3]\n"
967+" teq %1, #0\n"
968+" bne 1b"
969+
970+#ifdef CONFIG_PAX_REFCOUNT
971+"\n4:\n"
972+ _ASM_EXTABLE(2b, 4b)
973+#endif
974+
975+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
976+ : "r" (&v->counter), "Ir" (i)
977+ : "cc");
978+
979+ smp_mb();
980+
981+ return result;
982+}
983+
984+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
985+{
986+ unsigned long tmp;
987+ int result;
988+
989+ smp_mb();
990+
991+ __asm__ __volatile__("@ atomic_add_return_unchecked\n"
992 "1: ldrex %0, [%3]\n"
993 " add %0, %0, %4\n"
994 " strex %1, %0, [%3]\n"
995@@ -83,6 +167,36 @@ static inline void atomic_sub(int i, atomic_t *v)
996
997 prefetchw(&v->counter);
998 __asm__ __volatile__("@ atomic_sub\n"
999+"1: ldrex %1, [%3]\n"
1000+" subs %0, %1, %4\n"
1001+
1002+#ifdef CONFIG_PAX_REFCOUNT
1003+" bvc 3f\n"
1004+"2: bkpt 0xf103\n"
1005+"3:\n"
1006+#endif
1007+
1008+" strex %1, %0, [%3]\n"
1009+" teq %1, #0\n"
1010+" bne 1b"
1011+
1012+#ifdef CONFIG_PAX_REFCOUNT
1013+"\n4:\n"
1014+ _ASM_EXTABLE(2b, 4b)
1015+#endif
1016+
1017+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1018+ : "r" (&v->counter), "Ir" (i)
1019+ : "cc");
1020+}
1021+
1022+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
1023+{
1024+ unsigned long tmp;
1025+ int result;
1026+
1027+ prefetchw(&v->counter);
1028+ __asm__ __volatile__("@ atomic_sub_unchecked\n"
1029 "1: ldrex %0, [%3]\n"
1030 " sub %0, %0, %4\n"
1031 " strex %1, %0, [%3]\n"
1032@@ -101,11 +215,25 @@ static inline int atomic_sub_return(int i, atomic_t *v)
1033 smp_mb();
1034
1035 __asm__ __volatile__("@ atomic_sub_return\n"
1036-"1: ldrex %0, [%3]\n"
1037-" sub %0, %0, %4\n"
1038+"1: ldrex %1, [%3]\n"
1039+" subs %0, %1, %4\n"
1040+
1041+#ifdef CONFIG_PAX_REFCOUNT
1042+" bvc 3f\n"
1043+" mov %0, %1\n"
1044+"2: bkpt 0xf103\n"
1045+"3:\n"
1046+#endif
1047+
1048 " strex %1, %0, [%3]\n"
1049 " teq %1, #0\n"
1050 " bne 1b"
1051+
1052+#ifdef CONFIG_PAX_REFCOUNT
1053+"\n4:\n"
1054+ _ASM_EXTABLE(2b, 4b)
1055+#endif
1056+
1057 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1058 : "r" (&v->counter), "Ir" (i)
1059 : "cc");
1060@@ -138,6 +266,28 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
1061 return oldval;
1062 }
1063
1064+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *ptr, int old, int new)
1065+{
1066+ unsigned long oldval, res;
1067+
1068+ smp_mb();
1069+
1070+ do {
1071+ __asm__ __volatile__("@ atomic_cmpxchg_unchecked\n"
1072+ "ldrex %1, [%3]\n"
1073+ "mov %0, #0\n"
1074+ "teq %1, %4\n"
1075+ "strexeq %0, %5, [%3]\n"
1076+ : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
1077+ : "r" (&ptr->counter), "Ir" (old), "r" (new)
1078+ : "cc");
1079+ } while (res);
1080+
1081+ smp_mb();
1082+
1083+ return oldval;
1084+}
1085+
1086 #else /* ARM_ARCH_6 */
1087
1088 #ifdef CONFIG_SMP
1089@@ -156,7 +306,17 @@ static inline int atomic_add_return(int i, atomic_t *v)
1090
1091 return val;
1092 }
1093+
1094+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
1095+{
1096+ return atomic_add_return(i, v);
1097+}
1098+
1099 #define atomic_add(i, v) (void) atomic_add_return(i, v)
1100+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
1101+{
1102+ (void) atomic_add_return(i, v);
1103+}
1104
1105 static inline int atomic_sub_return(int i, atomic_t *v)
1106 {
1107@@ -171,6 +331,10 @@ static inline int atomic_sub_return(int i, atomic_t *v)
1108 return val;
1109 }
1110 #define atomic_sub(i, v) (void) atomic_sub_return(i, v)
1111+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
1112+{
1113+ (void) atomic_sub_return(i, v);
1114+}
1115
1116 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
1117 {
1118@@ -186,9 +350,18 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
1119 return ret;
1120 }
1121
1122+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
1123+{
1124+ return atomic_cmpxchg(v, old, new);
1125+}
1126+
1127 #endif /* __LINUX_ARM_ARCH__ */
1128
1129 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
1130+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
1131+{
1132+ return xchg(&v->counter, new);
1133+}
1134
1135 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
1136 {
1137@@ -201,11 +374,27 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
1138 }
1139
1140 #define atomic_inc(v) atomic_add(1, v)
1141+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
1142+{
1143+ atomic_add_unchecked(1, v);
1144+}
1145 #define atomic_dec(v) atomic_sub(1, v)
1146+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
1147+{
1148+ atomic_sub_unchecked(1, v);
1149+}
1150
1151 #define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
1152+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
1153+{
1154+ return atomic_add_return_unchecked(1, v) == 0;
1155+}
1156 #define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
1157 #define atomic_inc_return(v) (atomic_add_return(1, v))
1158+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
1159+{
1160+ return atomic_add_return_unchecked(1, v);
1161+}
1162 #define atomic_dec_return(v) (atomic_sub_return(1, v))
1163 #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
1164
1165@@ -221,6 +410,14 @@ typedef struct {
1166 long long counter;
1167 } atomic64_t;
1168
1169+#ifdef CONFIG_PAX_REFCOUNT
1170+typedef struct {
1171+ long long counter;
1172+} atomic64_unchecked_t;
1173+#else
1174+typedef atomic64_t atomic64_unchecked_t;
1175+#endif
1176+
1177 #define ATOMIC64_INIT(i) { (i) }
1178
1179 #ifdef CONFIG_ARM_LPAE
1180@@ -237,6 +434,19 @@ static inline long long atomic64_read(const atomic64_t *v)
1181 return result;
1182 }
1183
1184+static inline long long atomic64_read_unchecked(const atomic64_unchecked_t *v)
1185+{
1186+ long long result;
1187+
1188+ __asm__ __volatile__("@ atomic64_read_unchecked\n"
1189+" ldrd %0, %H0, [%1]"
1190+ : "=&r" (result)
1191+ : "r" (&v->counter), "Qo" (v->counter)
1192+ );
1193+
1194+ return result;
1195+}
1196+
1197 static inline void atomic64_set(atomic64_t *v, long long i)
1198 {
1199 __asm__ __volatile__("@ atomic64_set\n"
1200@@ -245,6 +455,15 @@ static inline void atomic64_set(atomic64_t *v, long long i)
1201 : "r" (&v->counter), "r" (i)
1202 );
1203 }
1204+
1205+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
1206+{
1207+ __asm__ __volatile__("@ atomic64_set_unchecked\n"
1208+" strd %2, %H2, [%1]"
1209+ : "=Qo" (v->counter)
1210+ : "r" (&v->counter), "r" (i)
1211+ );
1212+}
1213 #else
1214 static inline long long atomic64_read(const atomic64_t *v)
1215 {
1216@@ -259,6 +478,19 @@ static inline long long atomic64_read(const atomic64_t *v)
1217 return result;
1218 }
1219
1220+static inline long long atomic64_read_unchecked(const atomic64_unchecked_t *v)
1221+{
1222+ long long result;
1223+
1224+ __asm__ __volatile__("@ atomic64_read_unchecked\n"
1225+" ldrexd %0, %H0, [%1]"
1226+ : "=&r" (result)
1227+ : "r" (&v->counter), "Qo" (v->counter)
1228+ );
1229+
1230+ return result;
1231+}
1232+
1233 static inline void atomic64_set(atomic64_t *v, long long i)
1234 {
1235 long long tmp;
1236@@ -273,6 +505,21 @@ static inline void atomic64_set(atomic64_t *v, long long i)
1237 : "r" (&v->counter), "r" (i)
1238 : "cc");
1239 }
1240+
1241+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
1242+{
1243+ long long tmp;
1244+
1245+ prefetchw(&v->counter);
1246+ __asm__ __volatile__("@ atomic64_set_unchecked\n"
1247+"1: ldrexd %0, %H0, [%2]\n"
1248+" strexd %0, %3, %H3, [%2]\n"
1249+" teq %0, #0\n"
1250+" bne 1b"
1251+ : "=&r" (tmp), "=Qo" (v->counter)
1252+ : "r" (&v->counter), "r" (i)
1253+ : "cc");
1254+}
1255 #endif
1256
1257 static inline void atomic64_add(long long i, atomic64_t *v)
1258@@ -284,6 +531,37 @@ static inline void atomic64_add(long long i, atomic64_t *v)
1259 __asm__ __volatile__("@ atomic64_add\n"
1260 "1: ldrexd %0, %H0, [%3]\n"
1261 " adds %Q0, %Q0, %Q4\n"
1262+" adcs %R0, %R0, %R4\n"
1263+
1264+#ifdef CONFIG_PAX_REFCOUNT
1265+" bvc 3f\n"
1266+"2: bkpt 0xf103\n"
1267+"3:\n"
1268+#endif
1269+
1270+" strexd %1, %0, %H0, [%3]\n"
1271+" teq %1, #0\n"
1272+" bne 1b"
1273+
1274+#ifdef CONFIG_PAX_REFCOUNT
1275+"\n4:\n"
1276+ _ASM_EXTABLE(2b, 4b)
1277+#endif
1278+
1279+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1280+ : "r" (&v->counter), "r" (i)
1281+ : "cc");
1282+}
1283+
1284+static inline void atomic64_add_unchecked(long long i, atomic64_unchecked_t *v)
1285+{
1286+ long long result;
1287+ unsigned long tmp;
1288+
1289+ prefetchw(&v->counter);
1290+ __asm__ __volatile__("@ atomic64_add_unchecked\n"
1291+"1: ldrexd %0, %H0, [%3]\n"
1292+" adds %Q0, %Q0, %Q4\n"
1293 " adc %R0, %R0, %R4\n"
1294 " strexd %1, %0, %H0, [%3]\n"
1295 " teq %1, #0\n"
1296@@ -303,6 +581,44 @@ static inline long long atomic64_add_return(long long i, atomic64_t *v)
1297 __asm__ __volatile__("@ atomic64_add_return\n"
1298 "1: ldrexd %0, %H0, [%3]\n"
1299 " adds %Q0, %Q0, %Q4\n"
1300+" adcs %R0, %R0, %R4\n"
1301+
1302+#ifdef CONFIG_PAX_REFCOUNT
1303+" bvc 3f\n"
1304+" mov %0, %1\n"
1305+" mov %H0, %H1\n"
1306+"2: bkpt 0xf103\n"
1307+"3:\n"
1308+#endif
1309+
1310+" strexd %1, %0, %H0, [%3]\n"
1311+" teq %1, #0\n"
1312+" bne 1b"
1313+
1314+#ifdef CONFIG_PAX_REFCOUNT
1315+"\n4:\n"
1316+ _ASM_EXTABLE(2b, 4b)
1317+#endif
1318+
1319+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1320+ : "r" (&v->counter), "r" (i)
1321+ : "cc");
1322+
1323+ smp_mb();
1324+
1325+ return result;
1326+}
1327+
1328+static inline long long atomic64_add_return_unchecked(long long i, atomic64_unchecked_t *v)
1329+{
1330+ long long result;
1331+ unsigned long tmp;
1332+
1333+ smp_mb();
1334+
1335+ __asm__ __volatile__("@ atomic64_add_return_unchecked\n"
1336+"1: ldrexd %0, %H0, [%3]\n"
1337+" adds %Q0, %Q0, %Q4\n"
1338 " adc %R0, %R0, %R4\n"
1339 " strexd %1, %0, %H0, [%3]\n"
1340 " teq %1, #0\n"
1341@@ -325,6 +641,37 @@ static inline void atomic64_sub(long long i, atomic64_t *v)
1342 __asm__ __volatile__("@ atomic64_sub\n"
1343 "1: ldrexd %0, %H0, [%3]\n"
1344 " subs %Q0, %Q0, %Q4\n"
1345+" sbcs %R0, %R0, %R4\n"
1346+
1347+#ifdef CONFIG_PAX_REFCOUNT
1348+" bvc 3f\n"
1349+"2: bkpt 0xf103\n"
1350+"3:\n"
1351+#endif
1352+
1353+" strexd %1, %0, %H0, [%3]\n"
1354+" teq %1, #0\n"
1355+" bne 1b"
1356+
1357+#ifdef CONFIG_PAX_REFCOUNT
1358+"\n4:\n"
1359+ _ASM_EXTABLE(2b, 4b)
1360+#endif
1361+
1362+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1363+ : "r" (&v->counter), "r" (i)
1364+ : "cc");
1365+}
1366+
1367+static inline void atomic64_sub_unchecked(long long i, atomic64_unchecked_t *v)
1368+{
1369+ long long result;
1370+ unsigned long tmp;
1371+
1372+ prefetchw(&v->counter);
1373+ __asm__ __volatile__("@ atomic64_sub_unchecked\n"
1374+"1: ldrexd %0, %H0, [%3]\n"
1375+" subs %Q0, %Q0, %Q4\n"
1376 " sbc %R0, %R0, %R4\n"
1377 " strexd %1, %0, %H0, [%3]\n"
1378 " teq %1, #0\n"
1379@@ -344,16 +691,29 @@ static inline long long atomic64_sub_return(long long i, atomic64_t *v)
1380 __asm__ __volatile__("@ atomic64_sub_return\n"
1381 "1: ldrexd %0, %H0, [%3]\n"
1382 " subs %Q0, %Q0, %Q4\n"
1383-" sbc %R0, %R0, %R4\n"
1384+" sbcs %R0, %R0, %R4\n"
1385+
1386+#ifdef CONFIG_PAX_REFCOUNT
1387+" bvc 3f\n"
1388+" mov %0, %1\n"
1389+" mov %H0, %H1\n"
1390+"2: bkpt 0xf103\n"
1391+"3:\n"
1392+#endif
1393+
1394 " strexd %1, %0, %H0, [%3]\n"
1395 " teq %1, #0\n"
1396 " bne 1b"
1397+
1398+#ifdef CONFIG_PAX_REFCOUNT
1399+"\n4:\n"
1400+ _ASM_EXTABLE(2b, 4b)
1401+#endif
1402+
1403 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1404 : "r" (&v->counter), "r" (i)
1405 : "cc");
1406
1407- smp_mb();
1408-
1409 return result;
1410 }
1411
1412@@ -382,6 +742,31 @@ static inline long long atomic64_cmpxchg(atomic64_t *ptr, long long old,
1413 return oldval;
1414 }
1415
1416+static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, long long old,
1417+ long long new)
1418+{
1419+ long long oldval;
1420+ unsigned long res;
1421+
1422+ smp_mb();
1423+
1424+ do {
1425+ __asm__ __volatile__("@ atomic64_cmpxchg_unchecked\n"
1426+ "ldrexd %1, %H1, [%3]\n"
1427+ "mov %0, #0\n"
1428+ "teq %1, %4\n"
1429+ "teqeq %H1, %H4\n"
1430+ "strexdeq %0, %5, %H5, [%3]"
1431+ : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
1432+ : "r" (&ptr->counter), "r" (old), "r" (new)
1433+ : "cc");
1434+ } while (res);
1435+
1436+ smp_mb();
1437+
1438+ return oldval;
1439+}
1440+
1441 static inline long long atomic64_xchg(atomic64_t *ptr, long long new)
1442 {
1443 long long result;
1444@@ -406,20 +791,34 @@ static inline long long atomic64_xchg(atomic64_t *ptr, long long new)
1445 static inline long long atomic64_dec_if_positive(atomic64_t *v)
1446 {
1447 long long result;
1448- unsigned long tmp;
1449+ u64 tmp;
1450
1451 smp_mb();
1452
1453 __asm__ __volatile__("@ atomic64_dec_if_positive\n"
1454-"1: ldrexd %0, %H0, [%3]\n"
1455-" subs %Q0, %Q0, #1\n"
1456-" sbc %R0, %R0, #0\n"
1457+"1: ldrexd %1, %H1, [%3]\n"
1458+" subs %Q0, %Q1, #1\n"
1459+" sbcs %R0, %R1, #0\n"
1460+
1461+#ifdef CONFIG_PAX_REFCOUNT
1462+" bvc 3f\n"
1463+" mov %Q0, %Q1\n"
1464+" mov %R0, %R1\n"
1465+"2: bkpt 0xf103\n"
1466+"3:\n"
1467+#endif
1468+
1469 " teq %R0, #0\n"
1470-" bmi 2f\n"
1471+" bmi 4f\n"
1472 " strexd %1, %0, %H0, [%3]\n"
1473 " teq %1, #0\n"
1474 " bne 1b\n"
1475-"2:"
1476+"4:\n"
1477+
1478+#ifdef CONFIG_PAX_REFCOUNT
1479+ _ASM_EXTABLE(2b, 4b)
1480+#endif
1481+
1482 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1483 : "r" (&v->counter)
1484 : "cc");
1485@@ -442,13 +841,25 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
1486 " teq %0, %5\n"
1487 " teqeq %H0, %H5\n"
1488 " moveq %1, #0\n"
1489-" beq 2f\n"
1490+" beq 4f\n"
1491 " adds %Q0, %Q0, %Q6\n"
1492-" adc %R0, %R0, %R6\n"
1493+" adcs %R0, %R0, %R6\n"
1494+
1495+#ifdef CONFIG_PAX_REFCOUNT
1496+" bvc 3f\n"
1497+"2: bkpt 0xf103\n"
1498+"3:\n"
1499+#endif
1500+
1501 " strexd %2, %0, %H0, [%4]\n"
1502 " teq %2, #0\n"
1503 " bne 1b\n"
1504-"2:"
1505+"4:\n"
1506+
1507+#ifdef CONFIG_PAX_REFCOUNT
1508+ _ASM_EXTABLE(2b, 4b)
1509+#endif
1510+
1511 : "=&r" (val), "+r" (ret), "=&r" (tmp), "+Qo" (v->counter)
1512 : "r" (&v->counter), "r" (u), "r" (a)
1513 : "cc");
1514@@ -461,10 +872,13 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
1515
1516 #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
1517 #define atomic64_inc(v) atomic64_add(1LL, (v))
1518+#define atomic64_inc_unchecked(v) atomic64_add_unchecked(1LL, (v))
1519 #define atomic64_inc_return(v) atomic64_add_return(1LL, (v))
1520+#define atomic64_inc_return_unchecked(v) atomic64_add_return_unchecked(1LL, (v))
1521 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
1522 #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
1523 #define atomic64_dec(v) atomic64_sub(1LL, (v))
1524+#define atomic64_dec_unchecked(v) atomic64_sub_unchecked(1LL, (v))
1525 #define atomic64_dec_return(v) atomic64_sub_return(1LL, (v))
1526 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
1527 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
1528diff --git a/arch/arm/include/asm/barrier.h b/arch/arm/include/asm/barrier.h
1529index 2f59f74..1594659 100644
1530--- a/arch/arm/include/asm/barrier.h
1531+++ b/arch/arm/include/asm/barrier.h
1532@@ -63,7 +63,7 @@
1533 do { \
1534 compiletime_assert_atomic_type(*p); \
1535 smp_mb(); \
1536- ACCESS_ONCE(*p) = (v); \
1537+ ACCESS_ONCE_RW(*p) = (v); \
1538 } while (0)
1539
1540 #define smp_load_acquire(p) \
1541diff --git a/arch/arm/include/asm/cache.h b/arch/arm/include/asm/cache.h
1542index 75fe66b..ba3dee4 100644
1543--- a/arch/arm/include/asm/cache.h
1544+++ b/arch/arm/include/asm/cache.h
1545@@ -4,8 +4,10 @@
1546 #ifndef __ASMARM_CACHE_H
1547 #define __ASMARM_CACHE_H
1548
1549+#include <linux/const.h>
1550+
1551 #define L1_CACHE_SHIFT CONFIG_ARM_L1_CACHE_SHIFT
1552-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
1553+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
1554
1555 /*
1556 * Memory returned by kmalloc() may be used for DMA, so we must make
1557@@ -24,5 +26,6 @@
1558 #endif
1559
1560 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
1561+#define __read_only __attribute__ ((__section__(".data..read_only")))
1562
1563 #endif
1564diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
1565index 8b8b616..d973d24 100644
1566--- a/arch/arm/include/asm/cacheflush.h
1567+++ b/arch/arm/include/asm/cacheflush.h
1568@@ -116,7 +116,7 @@ struct cpu_cache_fns {
1569 void (*dma_unmap_area)(const void *, size_t, int);
1570
1571 void (*dma_flush_range)(const void *, const void *);
1572-};
1573+} __no_const;
1574
1575 /*
1576 * Select the calling method
1577diff --git a/arch/arm/include/asm/checksum.h b/arch/arm/include/asm/checksum.h
1578index 5233151..87a71fa 100644
1579--- a/arch/arm/include/asm/checksum.h
1580+++ b/arch/arm/include/asm/checksum.h
1581@@ -37,7 +37,19 @@ __wsum
1582 csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum);
1583
1584 __wsum
1585-csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr);
1586+__csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr);
1587+
1588+static inline __wsum
1589+csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr)
1590+{
1591+ __wsum ret;
1592+ pax_open_userland();
1593+ ret = __csum_partial_copy_from_user(src, dst, len, sum, err_ptr);
1594+ pax_close_userland();
1595+ return ret;
1596+}
1597+
1598+
1599
1600 /*
1601 * Fold a partial checksum without adding pseudo headers
1602diff --git a/arch/arm/include/asm/cmpxchg.h b/arch/arm/include/asm/cmpxchg.h
1603index df2fbba..63fe3e1 100644
1604--- a/arch/arm/include/asm/cmpxchg.h
1605+++ b/arch/arm/include/asm/cmpxchg.h
1606@@ -102,6 +102,8 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
1607
1608 #define xchg(ptr,x) \
1609 ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
1610+#define xchg_unchecked(ptr,x) \
1611+ ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
1612
1613 #include <asm-generic/cmpxchg-local.h>
1614
1615diff --git a/arch/arm/include/asm/domain.h b/arch/arm/include/asm/domain.h
1616index 6ddbe44..b5e38b1 100644
1617--- a/arch/arm/include/asm/domain.h
1618+++ b/arch/arm/include/asm/domain.h
1619@@ -48,18 +48,37 @@
1620 * Domain types
1621 */
1622 #define DOMAIN_NOACCESS 0
1623-#define DOMAIN_CLIENT 1
1624 #ifdef CONFIG_CPU_USE_DOMAINS
1625+#define DOMAIN_USERCLIENT 1
1626+#define DOMAIN_KERNELCLIENT 1
1627 #define DOMAIN_MANAGER 3
1628+#define DOMAIN_VECTORS DOMAIN_USER
1629 #else
1630+
1631+#ifdef CONFIG_PAX_KERNEXEC
1632 #define DOMAIN_MANAGER 1
1633+#define DOMAIN_KERNEXEC 3
1634+#else
1635+#define DOMAIN_MANAGER 1
1636+#endif
1637+
1638+#ifdef CONFIG_PAX_MEMORY_UDEREF
1639+#define DOMAIN_USERCLIENT 0
1640+#define DOMAIN_UDEREF 1
1641+#define DOMAIN_VECTORS DOMAIN_KERNEL
1642+#else
1643+#define DOMAIN_USERCLIENT 1
1644+#define DOMAIN_VECTORS DOMAIN_USER
1645+#endif
1646+#define DOMAIN_KERNELCLIENT 1
1647+
1648 #endif
1649
1650 #define domain_val(dom,type) ((type) << (2*(dom)))
1651
1652 #ifndef __ASSEMBLY__
1653
1654-#ifdef CONFIG_CPU_USE_DOMAINS
1655+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
1656 static inline void set_domain(unsigned val)
1657 {
1658 asm volatile(
1659@@ -68,15 +87,7 @@ static inline void set_domain(unsigned val)
1660 isb();
1661 }
1662
1663-#define modify_domain(dom,type) \
1664- do { \
1665- struct thread_info *thread = current_thread_info(); \
1666- unsigned int domain = thread->cpu_domain; \
1667- domain &= ~domain_val(dom, DOMAIN_MANAGER); \
1668- thread->cpu_domain = domain | domain_val(dom, type); \
1669- set_domain(thread->cpu_domain); \
1670- } while (0)
1671-
1672+extern void modify_domain(unsigned int dom, unsigned int type);
1673 #else
1674 static inline void set_domain(unsigned val) { }
1675 static inline void modify_domain(unsigned dom, unsigned type) { }
1676diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
1677index f4b46d3..abc9b2b 100644
1678--- a/arch/arm/include/asm/elf.h
1679+++ b/arch/arm/include/asm/elf.h
1680@@ -114,7 +114,14 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
1681 the loader. We need to make sure that it is out of the way of the program
1682 that it will "exec", and that there is sufficient room for the brk. */
1683
1684-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
1685+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1686+
1687+#ifdef CONFIG_PAX_ASLR
1688+#define PAX_ELF_ET_DYN_BASE 0x00008000UL
1689+
1690+#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
1691+#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
1692+#endif
1693
1694 /* When the program starts, a1 contains a pointer to a function to be
1695 registered with atexit, as per the SVR4 ABI. A value of 0 means we
1696@@ -124,10 +131,6 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
1697 extern void elf_set_personality(const struct elf32_hdr *);
1698 #define SET_PERSONALITY(ex) elf_set_personality(&(ex))
1699
1700-struct mm_struct;
1701-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
1702-#define arch_randomize_brk arch_randomize_brk
1703-
1704 #ifdef CONFIG_MMU
1705 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
1706 struct linux_binprm;
1707diff --git a/arch/arm/include/asm/fncpy.h b/arch/arm/include/asm/fncpy.h
1708index de53547..52b9a28 100644
1709--- a/arch/arm/include/asm/fncpy.h
1710+++ b/arch/arm/include/asm/fncpy.h
1711@@ -81,7 +81,9 @@
1712 BUG_ON((uintptr_t)(dest_buf) & (FNCPY_ALIGN - 1) || \
1713 (__funcp_address & ~(uintptr_t)1 & (FNCPY_ALIGN - 1))); \
1714 \
1715+ pax_open_kernel(); \
1716 memcpy(dest_buf, (void const *)(__funcp_address & ~1), size); \
1717+ pax_close_kernel(); \
1718 flush_icache_range((unsigned long)(dest_buf), \
1719 (unsigned long)(dest_buf) + (size)); \
1720 \
1721diff --git a/arch/arm/include/asm/futex.h b/arch/arm/include/asm/futex.h
1722index 2aff798..099eb15 100644
1723--- a/arch/arm/include/asm/futex.h
1724+++ b/arch/arm/include/asm/futex.h
1725@@ -45,6 +45,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1726 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
1727 return -EFAULT;
1728
1729+ pax_open_userland();
1730+
1731 smp_mb();
1732 __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
1733 "1: ldrex %1, [%4]\n"
1734@@ -60,6 +62,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1735 : "cc", "memory");
1736 smp_mb();
1737
1738+ pax_close_userland();
1739+
1740 *uval = val;
1741 return ret;
1742 }
1743@@ -90,6 +94,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1744 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
1745 return -EFAULT;
1746
1747+ pax_open_userland();
1748+
1749 __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
1750 "1: " TUSER(ldr) " %1, [%4]\n"
1751 " teq %1, %2\n"
1752@@ -100,6 +106,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1753 : "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT)
1754 : "cc", "memory");
1755
1756+ pax_close_userland();
1757+
1758 *uval = val;
1759 return ret;
1760 }
1761@@ -122,6 +130,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
1762 return -EFAULT;
1763
1764 pagefault_disable(); /* implies preempt_disable() */
1765+ pax_open_userland();
1766
1767 switch (op) {
1768 case FUTEX_OP_SET:
1769@@ -143,6 +152,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
1770 ret = -ENOSYS;
1771 }
1772
1773+ pax_close_userland();
1774 pagefault_enable(); /* subsumes preempt_enable() */
1775
1776 if (!ret) {
1777diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h
1778index 83eb2f7..ed77159 100644
1779--- a/arch/arm/include/asm/kmap_types.h
1780+++ b/arch/arm/include/asm/kmap_types.h
1781@@ -4,6 +4,6 @@
1782 /*
1783 * This is the "bare minimum". AIO seems to require this.
1784 */
1785-#define KM_TYPE_NR 16
1786+#define KM_TYPE_NR 17
1787
1788 #endif
1789diff --git a/arch/arm/include/asm/mach/dma.h b/arch/arm/include/asm/mach/dma.h
1790index 9e614a1..3302cca 100644
1791--- a/arch/arm/include/asm/mach/dma.h
1792+++ b/arch/arm/include/asm/mach/dma.h
1793@@ -22,7 +22,7 @@ struct dma_ops {
1794 int (*residue)(unsigned int, dma_t *); /* optional */
1795 int (*setspeed)(unsigned int, dma_t *, int); /* optional */
1796 const char *type;
1797-};
1798+} __do_const;
1799
1800 struct dma_struct {
1801 void *addr; /* single DMA address */
1802diff --git a/arch/arm/include/asm/mach/map.h b/arch/arm/include/asm/mach/map.h
1803index f98c7f3..e5c626d 100644
1804--- a/arch/arm/include/asm/mach/map.h
1805+++ b/arch/arm/include/asm/mach/map.h
1806@@ -23,17 +23,19 @@ struct map_desc {
1807
1808 /* types 0-3 are defined in asm/io.h */
1809 enum {
1810- MT_UNCACHED = 4,
1811- MT_CACHECLEAN,
1812- MT_MINICLEAN,
1813+ MT_UNCACHED_RW = 4,
1814+ MT_CACHECLEAN_RO,
1815+ MT_MINICLEAN_RO,
1816 MT_LOW_VECTORS,
1817 MT_HIGH_VECTORS,
1818- MT_MEMORY_RWX,
1819+ __MT_MEMORY_RWX,
1820 MT_MEMORY_RW,
1821- MT_ROM,
1822- MT_MEMORY_RWX_NONCACHED,
1823+ MT_MEMORY_RX,
1824+ MT_ROM_RX,
1825+ MT_MEMORY_RW_NONCACHED,
1826+ MT_MEMORY_RX_NONCACHED,
1827 MT_MEMORY_RW_DTCM,
1828- MT_MEMORY_RWX_ITCM,
1829+ MT_MEMORY_RX_ITCM,
1830 MT_MEMORY_RW_SO,
1831 MT_MEMORY_DMA_READY,
1832 };
1833diff --git a/arch/arm/include/asm/outercache.h b/arch/arm/include/asm/outercache.h
1834index f94784f..9a09a4a 100644
1835--- a/arch/arm/include/asm/outercache.h
1836+++ b/arch/arm/include/asm/outercache.h
1837@@ -35,7 +35,7 @@ struct outer_cache_fns {
1838 #endif
1839 void (*set_debug)(unsigned long);
1840 void (*resume)(void);
1841-};
1842+} __no_const;
1843
1844 extern struct outer_cache_fns outer_cache;
1845
1846diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h
1847index 4355f0e..cd9168e 100644
1848--- a/arch/arm/include/asm/page.h
1849+++ b/arch/arm/include/asm/page.h
1850@@ -23,6 +23,7 @@
1851
1852 #else
1853
1854+#include <linux/compiler.h>
1855 #include <asm/glue.h>
1856
1857 /*
1858@@ -114,7 +115,7 @@ struct cpu_user_fns {
1859 void (*cpu_clear_user_highpage)(struct page *page, unsigned long vaddr);
1860 void (*cpu_copy_user_highpage)(struct page *to, struct page *from,
1861 unsigned long vaddr, struct vm_area_struct *vma);
1862-};
1863+} __no_const;
1864
1865 #ifdef MULTI_USER
1866 extern struct cpu_user_fns cpu_user;
1867diff --git a/arch/arm/include/asm/pgalloc.h b/arch/arm/include/asm/pgalloc.h
1868index 78a7793..e3dc06c 100644
1869--- a/arch/arm/include/asm/pgalloc.h
1870+++ b/arch/arm/include/asm/pgalloc.h
1871@@ -17,6 +17,7 @@
1872 #include <asm/processor.h>
1873 #include <asm/cacheflush.h>
1874 #include <asm/tlbflush.h>
1875+#include <asm/system_info.h>
1876
1877 #define check_pgt_cache() do { } while (0)
1878
1879@@ -43,6 +44,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1880 set_pud(pud, __pud(__pa(pmd) | PMD_TYPE_TABLE));
1881 }
1882
1883+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1884+{
1885+ pud_populate(mm, pud, pmd);
1886+}
1887+
1888 #else /* !CONFIG_ARM_LPAE */
1889
1890 /*
1891@@ -51,6 +57,7 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1892 #define pmd_alloc_one(mm,addr) ({ BUG(); ((pmd_t *)2); })
1893 #define pmd_free(mm, pmd) do { } while (0)
1894 #define pud_populate(mm,pmd,pte) BUG()
1895+#define pud_populate_kernel(mm,pmd,pte) BUG()
1896
1897 #endif /* CONFIG_ARM_LPAE */
1898
1899@@ -128,6 +135,19 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
1900 __free_page(pte);
1901 }
1902
1903+static inline void __section_update(pmd_t *pmdp, unsigned long addr, pmdval_t prot)
1904+{
1905+#ifdef CONFIG_ARM_LPAE
1906+ pmdp[0] = __pmd(pmd_val(pmdp[0]) | prot);
1907+#else
1908+ if (addr & SECTION_SIZE)
1909+ pmdp[1] = __pmd(pmd_val(pmdp[1]) | prot);
1910+ else
1911+ pmdp[0] = __pmd(pmd_val(pmdp[0]) | prot);
1912+#endif
1913+ flush_pmd_entry(pmdp);
1914+}
1915+
1916 static inline void __pmd_populate(pmd_t *pmdp, phys_addr_t pte,
1917 pmdval_t prot)
1918 {
1919@@ -157,7 +177,7 @@ pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep)
1920 static inline void
1921 pmd_populate(struct mm_struct *mm, pmd_t *pmdp, pgtable_t ptep)
1922 {
1923- __pmd_populate(pmdp, page_to_phys(ptep), _PAGE_USER_TABLE);
1924+ __pmd_populate(pmdp, page_to_phys(ptep), _PAGE_USER_TABLE | __supported_pmd_mask);
1925 }
1926 #define pmd_pgtable(pmd) pmd_page(pmd)
1927
1928diff --git a/arch/arm/include/asm/pgtable-2level-hwdef.h b/arch/arm/include/asm/pgtable-2level-hwdef.h
1929index 5cfba15..f415e1a 100644
1930--- a/arch/arm/include/asm/pgtable-2level-hwdef.h
1931+++ b/arch/arm/include/asm/pgtable-2level-hwdef.h
1932@@ -20,12 +20,15 @@
1933 #define PMD_TYPE_FAULT (_AT(pmdval_t, 0) << 0)
1934 #define PMD_TYPE_TABLE (_AT(pmdval_t, 1) << 0)
1935 #define PMD_TYPE_SECT (_AT(pmdval_t, 2) << 0)
1936+#define PMD_PXNTABLE (_AT(pmdval_t, 1) << 2) /* v7 */
1937 #define PMD_BIT4 (_AT(pmdval_t, 1) << 4)
1938 #define PMD_DOMAIN(x) (_AT(pmdval_t, (x)) << 5)
1939 #define PMD_PROTECTION (_AT(pmdval_t, 1) << 9) /* v5 */
1940+
1941 /*
1942 * - section
1943 */
1944+#define PMD_SECT_PXN (_AT(pmdval_t, 1) << 0) /* v7 */
1945 #define PMD_SECT_BUFFERABLE (_AT(pmdval_t, 1) << 2)
1946 #define PMD_SECT_CACHEABLE (_AT(pmdval_t, 1) << 3)
1947 #define PMD_SECT_XN (_AT(pmdval_t, 1) << 4) /* v6 */
1948@@ -37,6 +40,7 @@
1949 #define PMD_SECT_nG (_AT(pmdval_t, 1) << 17) /* v6 */
1950 #define PMD_SECT_SUPER (_AT(pmdval_t, 1) << 18) /* v6 */
1951 #define PMD_SECT_AF (_AT(pmdval_t, 0))
1952+#define PMD_SECT_RDONLY (_AT(pmdval_t, 0))
1953
1954 #define PMD_SECT_UNCACHED (_AT(pmdval_t, 0))
1955 #define PMD_SECT_BUFFERED (PMD_SECT_BUFFERABLE)
1956@@ -66,6 +70,7 @@
1957 * - extended small page/tiny page
1958 */
1959 #define PTE_EXT_XN (_AT(pteval_t, 1) << 0) /* v6 */
1960+#define PTE_EXT_PXN (_AT(pteval_t, 1) << 2) /* v7 */
1961 #define PTE_EXT_AP_MASK (_AT(pteval_t, 3) << 4)
1962 #define PTE_EXT_AP0 (_AT(pteval_t, 1) << 4)
1963 #define PTE_EXT_AP1 (_AT(pteval_t, 2) << 4)
1964diff --git a/arch/arm/include/asm/pgtable-2level.h b/arch/arm/include/asm/pgtable-2level.h
1965index 219ac88..73ec32a 100644
1966--- a/arch/arm/include/asm/pgtable-2level.h
1967+++ b/arch/arm/include/asm/pgtable-2level.h
1968@@ -126,6 +126,9 @@
1969 #define L_PTE_SHARED (_AT(pteval_t, 1) << 10) /* shared(v6), coherent(xsc3) */
1970 #define L_PTE_NONE (_AT(pteval_t, 1) << 11)
1971
1972+/* Two-level page tables only have PXN in the PGD, not in the PTE. */
1973+#define L_PTE_PXN (_AT(pteval_t, 0))
1974+
1975 /*
1976 * These are the memory types, defined to be compatible with
1977 * pre-ARMv6 CPUs cacheable and bufferable bits: XXCB
1978diff --git a/arch/arm/include/asm/pgtable-3level-hwdef.h b/arch/arm/include/asm/pgtable-3level-hwdef.h
1979index 626989f..9d67a33 100644
1980--- a/arch/arm/include/asm/pgtable-3level-hwdef.h
1981+++ b/arch/arm/include/asm/pgtable-3level-hwdef.h
1982@@ -75,6 +75,7 @@
1983 #define PTE_EXT_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */
1984 #define PTE_EXT_AF (_AT(pteval_t, 1) << 10) /* Access Flag */
1985 #define PTE_EXT_NG (_AT(pteval_t, 1) << 11) /* nG */
1986+#define PTE_EXT_PXN (_AT(pteval_t, 1) << 53) /* PXN */
1987 #define PTE_EXT_XN (_AT(pteval_t, 1) << 54) /* XN */
1988
1989 /*
1990diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h
1991index 85c60ad..b0bbd7e 100644
1992--- a/arch/arm/include/asm/pgtable-3level.h
1993+++ b/arch/arm/include/asm/pgtable-3level.h
1994@@ -82,6 +82,7 @@
1995 #define L_PTE_RDONLY (_AT(pteval_t, 1) << 7) /* AP[2] */
1996 #define L_PTE_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */
1997 #define L_PTE_YOUNG (_AT(pteval_t, 1) << 10) /* AF */
1998+#define L_PTE_PXN (_AT(pteval_t, 1) << 53) /* PXN */
1999 #define L_PTE_XN (_AT(pteval_t, 1) << 54) /* XN */
2000 #define L_PTE_DIRTY (_AT(pteval_t, 1) << 55) /* unused */
2001 #define L_PTE_SPECIAL (_AT(pteval_t, 1) << 56) /* unused */
2002@@ -95,6 +96,7 @@
2003 /*
2004 * To be used in assembly code with the upper page attributes.
2005 */
2006+#define L_PTE_PXN_HIGH (1 << (53 - 32))
2007 #define L_PTE_XN_HIGH (1 << (54 - 32))
2008 #define L_PTE_DIRTY_HIGH (1 << (55 - 32))
2009
2010diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
2011index 7d59b52..27a12f8 100644
2012--- a/arch/arm/include/asm/pgtable.h
2013+++ b/arch/arm/include/asm/pgtable.h
2014@@ -33,6 +33,9 @@
2015 #include <asm/pgtable-2level.h>
2016 #endif
2017
2018+#define ktla_ktva(addr) (addr)
2019+#define ktva_ktla(addr) (addr)
2020+
2021 /*
2022 * Just any arbitrary offset to the start of the vmalloc VM area: the
2023 * current 8MB value just means that there will be a 8MB "hole" after the
2024@@ -48,6 +51,9 @@
2025 #define LIBRARY_TEXT_START 0x0c000000
2026
2027 #ifndef __ASSEMBLY__
2028+extern pteval_t __supported_pte_mask;
2029+extern pmdval_t __supported_pmd_mask;
2030+
2031 extern void __pte_error(const char *file, int line, pte_t);
2032 extern void __pmd_error(const char *file, int line, pmd_t);
2033 extern void __pgd_error(const char *file, int line, pgd_t);
2034@@ -56,6 +62,48 @@ extern void __pgd_error(const char *file, int line, pgd_t);
2035 #define pmd_ERROR(pmd) __pmd_error(__FILE__, __LINE__, pmd)
2036 #define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd)
2037
2038+#define __HAVE_ARCH_PAX_OPEN_KERNEL
2039+#define __HAVE_ARCH_PAX_CLOSE_KERNEL
2040+
2041+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2042+#include <asm/domain.h>
2043+#include <linux/thread_info.h>
2044+#include <linux/preempt.h>
2045+
2046+static inline int test_domain(int domain, int domaintype)
2047+{
2048+ return ((current_thread_info()->cpu_domain) & domain_val(domain, 3)) == domain_val(domain, domaintype);
2049+}
2050+#endif
2051+
2052+#ifdef CONFIG_PAX_KERNEXEC
2053+static inline unsigned long pax_open_kernel(void) {
2054+#ifdef CONFIG_ARM_LPAE
2055+ /* TODO */
2056+#else
2057+ preempt_disable();
2058+ BUG_ON(test_domain(DOMAIN_KERNEL, DOMAIN_KERNEXEC));
2059+ modify_domain(DOMAIN_KERNEL, DOMAIN_KERNEXEC);
2060+#endif
2061+ return 0;
2062+}
2063+
2064+static inline unsigned long pax_close_kernel(void) {
2065+#ifdef CONFIG_ARM_LPAE
2066+ /* TODO */
2067+#else
2068+ BUG_ON(test_domain(DOMAIN_KERNEL, DOMAIN_MANAGER));
2069+ /* DOMAIN_MANAGER = "client" under KERNEXEC */
2070+ modify_domain(DOMAIN_KERNEL, DOMAIN_MANAGER);
2071+ preempt_enable_no_resched();
2072+#endif
2073+ return 0;
2074+}
2075+#else
2076+static inline unsigned long pax_open_kernel(void) { return 0; }
2077+static inline unsigned long pax_close_kernel(void) { return 0; }
2078+#endif
2079+
2080 /*
2081 * This is the lowest virtual address we can permit any user space
2082 * mapping to be mapped at. This is particularly important for
2083@@ -75,8 +123,8 @@ extern void __pgd_error(const char *file, int line, pgd_t);
2084 /*
2085 * The pgprot_* and protection_map entries will be fixed up in runtime
2086 * to include the cachable and bufferable bits based on memory policy,
2087- * as well as any architecture dependent bits like global/ASID and SMP
2088- * shared mapping bits.
2089+ * as well as any architecture dependent bits like global/ASID, PXN,
2090+ * and SMP shared mapping bits.
2091 */
2092 #define _L_PTE_DEFAULT L_PTE_PRESENT | L_PTE_YOUNG
2093
2094@@ -262,7 +310,7 @@ static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
2095 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
2096 {
2097 const pteval_t mask = L_PTE_XN | L_PTE_RDONLY | L_PTE_USER |
2098- L_PTE_NONE | L_PTE_VALID;
2099+ L_PTE_NONE | L_PTE_VALID | __supported_pte_mask;
2100 pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
2101 return pte;
2102 }
2103diff --git a/arch/arm/include/asm/psci.h b/arch/arm/include/asm/psci.h
2104index c4ae171..ea0c0c2 100644
2105--- a/arch/arm/include/asm/psci.h
2106+++ b/arch/arm/include/asm/psci.h
2107@@ -29,7 +29,7 @@ struct psci_operations {
2108 int (*cpu_off)(struct psci_power_state state);
2109 int (*cpu_on)(unsigned long cpuid, unsigned long entry_point);
2110 int (*migrate)(unsigned long cpuid);
2111-};
2112+} __no_const;
2113
2114 extern struct psci_operations psci_ops;
2115 extern struct smp_operations psci_smp_ops;
2116diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h
2117index 22a3b9b..7f214ee 100644
2118--- a/arch/arm/include/asm/smp.h
2119+++ b/arch/arm/include/asm/smp.h
2120@@ -112,7 +112,7 @@ struct smp_operations {
2121 int (*cpu_disable)(unsigned int cpu);
2122 #endif
2123 #endif
2124-};
2125+} __no_const;
2126
2127 /*
2128 * set platform specific SMP operations
2129diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
2130index 71a06b2..8bb9ae1 100644
2131--- a/arch/arm/include/asm/thread_info.h
2132+++ b/arch/arm/include/asm/thread_info.h
2133@@ -88,9 +88,9 @@ struct thread_info {
2134 .flags = 0, \
2135 .preempt_count = INIT_PREEMPT_COUNT, \
2136 .addr_limit = KERNEL_DS, \
2137- .cpu_domain = domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
2138- domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
2139- domain_val(DOMAIN_IO, DOMAIN_CLIENT), \
2140+ .cpu_domain = domain_val(DOMAIN_USER, DOMAIN_USERCLIENT) | \
2141+ domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT) | \
2142+ domain_val(DOMAIN_IO, DOMAIN_KERNELCLIENT), \
2143 .restart_block = { \
2144 .fn = do_no_restart_syscall, \
2145 }, \
2146@@ -157,7 +157,11 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
2147 #define TIF_SYSCALL_AUDIT 9
2148 #define TIF_SYSCALL_TRACEPOINT 10
2149 #define TIF_SECCOMP 11 /* seccomp syscall filtering active */
2150-#define TIF_NOHZ 12 /* in adaptive nohz mode */
2151+/* within 8 bits of TIF_SYSCALL_TRACE
2152+ * to meet flexible second operand requirements
2153+ */
2154+#define TIF_GRSEC_SETXID 12
2155+#define TIF_NOHZ 13 /* in adaptive nohz mode */
2156 #define TIF_USING_IWMMXT 17
2157 #define TIF_MEMDIE 18 /* is terminating due to OOM killer */
2158 #define TIF_RESTORE_SIGMASK 20
2159@@ -170,10 +174,11 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
2160 #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
2161 #define _TIF_SECCOMP (1 << TIF_SECCOMP)
2162 #define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT)
2163+#define _TIF_GRSEC_SETXID (1 << TIF_GRSEC_SETXID)
2164
2165 /* Checks for any syscall work in entry-common.S */
2166 #define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
2167- _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP)
2168+ _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | _TIF_GRSEC_SETXID)
2169
2170 /*
2171 * Change these and you break ASM code in entry-common.S
2172diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
2173index 72abdc5..35acac1 100644
2174--- a/arch/arm/include/asm/uaccess.h
2175+++ b/arch/arm/include/asm/uaccess.h
2176@@ -18,6 +18,7 @@
2177 #include <asm/domain.h>
2178 #include <asm/unified.h>
2179 #include <asm/compiler.h>
2180+#include <asm/pgtable.h>
2181
2182 #if __LINUX_ARM_ARCH__ < 6
2183 #include <asm-generic/uaccess-unaligned.h>
2184@@ -70,11 +71,38 @@ extern int __put_user_bad(void);
2185 static inline void set_fs(mm_segment_t fs)
2186 {
2187 current_thread_info()->addr_limit = fs;
2188- modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_CLIENT : DOMAIN_MANAGER);
2189+ modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_KERNELCLIENT : DOMAIN_MANAGER);
2190 }
2191
2192 #define segment_eq(a,b) ((a) == (b))
2193
2194+#define __HAVE_ARCH_PAX_OPEN_USERLAND
2195+#define __HAVE_ARCH_PAX_CLOSE_USERLAND
2196+
2197+static inline void pax_open_userland(void)
2198+{
2199+
2200+#ifdef CONFIG_PAX_MEMORY_UDEREF
2201+ if (segment_eq(get_fs(), USER_DS)) {
2202+ BUG_ON(test_domain(DOMAIN_USER, DOMAIN_UDEREF));
2203+ modify_domain(DOMAIN_USER, DOMAIN_UDEREF);
2204+ }
2205+#endif
2206+
2207+}
2208+
2209+static inline void pax_close_userland(void)
2210+{
2211+
2212+#ifdef CONFIG_PAX_MEMORY_UDEREF
2213+ if (segment_eq(get_fs(), USER_DS)) {
2214+ BUG_ON(test_domain(DOMAIN_USER, DOMAIN_NOACCESS));
2215+ modify_domain(DOMAIN_USER, DOMAIN_NOACCESS);
2216+ }
2217+#endif
2218+
2219+}
2220+
2221 #define __addr_ok(addr) ({ \
2222 unsigned long flag; \
2223 __asm__("cmp %2, %0; movlo %0, #0" \
2224@@ -150,8 +178,12 @@ extern int __get_user_4(void *);
2225
2226 #define get_user(x,p) \
2227 ({ \
2228+ int __e; \
2229 might_fault(); \
2230- __get_user_check(x,p); \
2231+ pax_open_userland(); \
2232+ __e = __get_user_check(x,p); \
2233+ pax_close_userland(); \
2234+ __e; \
2235 })
2236
2237 extern int __put_user_1(void *, unsigned int);
2238@@ -195,8 +227,12 @@ extern int __put_user_8(void *, unsigned long long);
2239
2240 #define put_user(x,p) \
2241 ({ \
2242+ int __e; \
2243 might_fault(); \
2244- __put_user_check(x,p); \
2245+ pax_open_userland(); \
2246+ __e = __put_user_check(x,p); \
2247+ pax_close_userland(); \
2248+ __e; \
2249 })
2250
2251 #else /* CONFIG_MMU */
2252@@ -220,6 +256,7 @@ static inline void set_fs(mm_segment_t fs)
2253
2254 #endif /* CONFIG_MMU */
2255
2256+#define access_ok_noprefault(type,addr,size) access_ok((type),(addr),(size))
2257 #define access_ok(type,addr,size) (__range_ok(addr,size) == 0)
2258
2259 #define user_addr_max() \
2260@@ -237,13 +274,17 @@ static inline void set_fs(mm_segment_t fs)
2261 #define __get_user(x,ptr) \
2262 ({ \
2263 long __gu_err = 0; \
2264+ pax_open_userland(); \
2265 __get_user_err((x),(ptr),__gu_err); \
2266+ pax_close_userland(); \
2267 __gu_err; \
2268 })
2269
2270 #define __get_user_error(x,ptr,err) \
2271 ({ \
2272+ pax_open_userland(); \
2273 __get_user_err((x),(ptr),err); \
2274+ pax_close_userland(); \
2275 (void) 0; \
2276 })
2277
2278@@ -319,13 +360,17 @@ do { \
2279 #define __put_user(x,ptr) \
2280 ({ \
2281 long __pu_err = 0; \
2282+ pax_open_userland(); \
2283 __put_user_err((x),(ptr),__pu_err); \
2284+ pax_close_userland(); \
2285 __pu_err; \
2286 })
2287
2288 #define __put_user_error(x,ptr,err) \
2289 ({ \
2290+ pax_open_userland(); \
2291 __put_user_err((x),(ptr),err); \
2292+ pax_close_userland(); \
2293 (void) 0; \
2294 })
2295
2296@@ -425,11 +470,44 @@ do { \
2297
2298
2299 #ifdef CONFIG_MMU
2300-extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
2301-extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
2302+extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n);
2303+extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n);
2304+
2305+static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
2306+{
2307+ unsigned long ret;
2308+
2309+ check_object_size(to, n, false);
2310+ pax_open_userland();
2311+ ret = ___copy_from_user(to, from, n);
2312+ pax_close_userland();
2313+ return ret;
2314+}
2315+
2316+static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
2317+{
2318+ unsigned long ret;
2319+
2320+ check_object_size(from, n, true);
2321+ pax_open_userland();
2322+ ret = ___copy_to_user(to, from, n);
2323+ pax_close_userland();
2324+ return ret;
2325+}
2326+
2327 extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
2328-extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
2329+extern unsigned long __must_check ___clear_user(void __user *addr, unsigned long n);
2330 extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
2331+
2332+static inline unsigned long __must_check __clear_user(void __user *addr, unsigned long n)
2333+{
2334+ unsigned long ret;
2335+ pax_open_userland();
2336+ ret = ___clear_user(addr, n);
2337+ pax_close_userland();
2338+ return ret;
2339+}
2340+
2341 #else
2342 #define __copy_from_user(to,from,n) (memcpy(to, (void __force *)from, n), 0)
2343 #define __copy_to_user(to,from,n) (memcpy((void __force *)to, from, n), 0)
2344@@ -438,6 +516,9 @@ extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned l
2345
2346 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
2347 {
2348+ if ((long)n < 0)
2349+ return n;
2350+
2351 if (access_ok(VERIFY_READ, from, n))
2352 n = __copy_from_user(to, from, n);
2353 else /* security hole - plug it */
2354@@ -447,6 +528,9 @@ static inline unsigned long __must_check copy_from_user(void *to, const void __u
2355
2356 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
2357 {
2358+ if ((long)n < 0)
2359+ return n;
2360+
2361 if (access_ok(VERIFY_WRITE, to, n))
2362 n = __copy_to_user(to, from, n);
2363 return n;
2364diff --git a/arch/arm/include/uapi/asm/ptrace.h b/arch/arm/include/uapi/asm/ptrace.h
2365index 5af0ed1..cea83883 100644
2366--- a/arch/arm/include/uapi/asm/ptrace.h
2367+++ b/arch/arm/include/uapi/asm/ptrace.h
2368@@ -92,7 +92,7 @@
2369 * ARMv7 groups of PSR bits
2370 */
2371 #define APSR_MASK 0xf80f0000 /* N, Z, C, V, Q and GE flags */
2372-#define PSR_ISET_MASK 0x01000010 /* ISA state (J, T) mask */
2373+#define PSR_ISET_MASK 0x01000020 /* ISA state (J, T) mask */
2374 #define PSR_IT_MASK 0x0600fc00 /* If-Then execution state mask */
2375 #define PSR_ENDIAN_MASK 0x00000200 /* Endianness state mask */
2376
2377diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
2378index 85e664b..419a1cd 100644
2379--- a/arch/arm/kernel/armksyms.c
2380+++ b/arch/arm/kernel/armksyms.c
2381@@ -55,7 +55,7 @@ EXPORT_SYMBOL(arm_delay_ops);
2382
2383 /* networking */
2384 EXPORT_SYMBOL(csum_partial);
2385-EXPORT_SYMBOL(csum_partial_copy_from_user);
2386+EXPORT_SYMBOL(__csum_partial_copy_from_user);
2387 EXPORT_SYMBOL(csum_partial_copy_nocheck);
2388 EXPORT_SYMBOL(__csum_ipv6_magic);
2389
2390@@ -91,9 +91,9 @@ EXPORT_SYMBOL(__memzero);
2391 #ifdef CONFIG_MMU
2392 EXPORT_SYMBOL(copy_page);
2393
2394-EXPORT_SYMBOL(__copy_from_user);
2395-EXPORT_SYMBOL(__copy_to_user);
2396-EXPORT_SYMBOL(__clear_user);
2397+EXPORT_SYMBOL(___copy_from_user);
2398+EXPORT_SYMBOL(___copy_to_user);
2399+EXPORT_SYMBOL(___clear_user);
2400
2401 EXPORT_SYMBOL(__get_user_1);
2402 EXPORT_SYMBOL(__get_user_2);
2403diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
2404index 1879e8d..b2207fc 100644
2405--- a/arch/arm/kernel/entry-armv.S
2406+++ b/arch/arm/kernel/entry-armv.S
2407@@ -47,6 +47,87 @@
2408 9997:
2409 .endm
2410
2411+ .macro pax_enter_kernel
2412+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2413+ @ make aligned space for saved DACR
2414+ sub sp, sp, #8
2415+ @ save regs
2416+ stmdb sp!, {r1, r2}
2417+ @ read DACR from cpu_domain into r1
2418+ mov r2, sp
2419+ @ assume 8K pages, since we have to split the immediate in two
2420+ bic r2, r2, #(0x1fc0)
2421+ bic r2, r2, #(0x3f)
2422+ ldr r1, [r2, #TI_CPU_DOMAIN]
2423+ @ store old DACR on stack
2424+ str r1, [sp, #8]
2425+#ifdef CONFIG_PAX_KERNEXEC
2426+ @ set type of DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
2427+ bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
2428+ orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
2429+#endif
2430+#ifdef CONFIG_PAX_MEMORY_UDEREF
2431+ @ set current DOMAIN_USER to DOMAIN_NOACCESS
2432+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2433+#endif
2434+ @ write r1 to current_thread_info()->cpu_domain
2435+ str r1, [r2, #TI_CPU_DOMAIN]
2436+ @ write r1 to DACR
2437+ mcr p15, 0, r1, c3, c0, 0
2438+ @ instruction sync
2439+ instr_sync
2440+ @ restore regs
2441+ ldmia sp!, {r1, r2}
2442+#endif
2443+ .endm
2444+
2445+ .macro pax_open_userland
2446+#ifdef CONFIG_PAX_MEMORY_UDEREF
2447+ @ save regs
2448+ stmdb sp!, {r0, r1}
2449+ @ read DACR from cpu_domain into r1
2450+ mov r0, sp
2451+ @ assume 8K pages, since we have to split the immediate in two
2452+ bic r0, r0, #(0x1fc0)
2453+ bic r0, r0, #(0x3f)
2454+ ldr r1, [r0, #TI_CPU_DOMAIN]
2455+ @ set current DOMAIN_USER to DOMAIN_CLIENT
2456+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2457+ orr r1, r1, #(domain_val(DOMAIN_USER, DOMAIN_UDEREF))
2458+ @ write r1 to current_thread_info()->cpu_domain
2459+ str r1, [r0, #TI_CPU_DOMAIN]
2460+ @ write r1 to DACR
2461+ mcr p15, 0, r1, c3, c0, 0
2462+ @ instruction sync
2463+ instr_sync
2464+ @ restore regs
2465+ ldmia sp!, {r0, r1}
2466+#endif
2467+ .endm
2468+
2469+ .macro pax_close_userland
2470+#ifdef CONFIG_PAX_MEMORY_UDEREF
2471+ @ save regs
2472+ stmdb sp!, {r0, r1}
2473+ @ read DACR from cpu_domain into r1
2474+ mov r0, sp
2475+ @ assume 8K pages, since we have to split the immediate in two
2476+ bic r0, r0, #(0x1fc0)
2477+ bic r0, r0, #(0x3f)
2478+ ldr r1, [r0, #TI_CPU_DOMAIN]
2479+ @ set current DOMAIN_USER to DOMAIN_NOACCESS
2480+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2481+ @ write r1 to current_thread_info()->cpu_domain
2482+ str r1, [r0, #TI_CPU_DOMAIN]
2483+ @ write r1 to DACR
2484+ mcr p15, 0, r1, c3, c0, 0
2485+ @ instruction sync
2486+ instr_sync
2487+ @ restore regs
2488+ ldmia sp!, {r0, r1}
2489+#endif
2490+ .endm
2491+
2492 .macro pabt_helper
2493 @ PABORT handler takes pt_regs in r2, fault address in r4 and psr in r5
2494 #ifdef MULTI_PABORT
2495@@ -89,11 +170,15 @@
2496 * Invalid mode handlers
2497 */
2498 .macro inv_entry, reason
2499+
2500+ pax_enter_kernel
2501+
2502 sub sp, sp, #S_FRAME_SIZE
2503 ARM( stmib sp, {r1 - lr} )
2504 THUMB( stmia sp, {r0 - r12} )
2505 THUMB( str sp, [sp, #S_SP] )
2506 THUMB( str lr, [sp, #S_LR] )
2507+
2508 mov r1, #\reason
2509 .endm
2510
2511@@ -149,7 +234,11 @@ ENDPROC(__und_invalid)
2512 .macro svc_entry, stack_hole=0
2513 UNWIND(.fnstart )
2514 UNWIND(.save {r0 - pc} )
2515+
2516+ pax_enter_kernel
2517+
2518 sub sp, sp, #(S_FRAME_SIZE + \stack_hole - 4)
2519+
2520 #ifdef CONFIG_THUMB2_KERNEL
2521 SPFIX( str r0, [sp] ) @ temporarily saved
2522 SPFIX( mov r0, sp )
2523@@ -164,7 +253,12 @@ ENDPROC(__und_invalid)
2524 ldmia r0, {r3 - r5}
2525 add r7, sp, #S_SP - 4 @ here for interlock avoidance
2526 mov r6, #-1 @ "" "" "" ""
2527+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2528+ @ offset sp by 8 as done in pax_enter_kernel
2529+ add r2, sp, #(S_FRAME_SIZE + \stack_hole + 4)
2530+#else
2531 add r2, sp, #(S_FRAME_SIZE + \stack_hole - 4)
2532+#endif
2533 SPFIX( addeq r2, r2, #4 )
2534 str r3, [sp, #-4]! @ save the "real" r0 copied
2535 @ from the exception stack
2536@@ -317,6 +411,9 @@ ENDPROC(__pabt_svc)
2537 .macro usr_entry
2538 UNWIND(.fnstart )
2539 UNWIND(.cantunwind ) @ don't unwind the user space
2540+
2541+ pax_enter_kernel_user
2542+
2543 sub sp, sp, #S_FRAME_SIZE
2544 ARM( stmib sp, {r1 - r12} )
2545 THUMB( stmia sp, {r0 - r12} )
2546@@ -416,7 +513,9 @@ __und_usr:
2547 tst r3, #PSR_T_BIT @ Thumb mode?
2548 bne __und_usr_thumb
2549 sub r4, r2, #4 @ ARM instr at LR - 4
2550+ pax_open_userland
2551 1: ldrt r0, [r4]
2552+ pax_close_userland
2553 ARM_BE8(rev r0, r0) @ little endian instruction
2554
2555 @ r0 = 32-bit ARM instruction which caused the exception
2556@@ -450,11 +549,15 @@ __und_usr_thumb:
2557 */
2558 .arch armv6t2
2559 #endif
2560+ pax_open_userland
2561 2: ldrht r5, [r4]
2562+ pax_close_userland
2563 ARM_BE8(rev16 r5, r5) @ little endian instruction
2564 cmp r5, #0xe800 @ 32bit instruction if xx != 0
2565 blo __und_usr_fault_16 @ 16bit undefined instruction
2566+ pax_open_userland
2567 3: ldrht r0, [r2]
2568+ pax_close_userland
2569 ARM_BE8(rev16 r0, r0) @ little endian instruction
2570 add r2, r2, #2 @ r2 is PC + 2, make it PC + 4
2571 str r2, [sp, #S_PC] @ it's a 2x16bit instr, update
2572@@ -484,7 +587,8 @@ ENDPROC(__und_usr)
2573 */
2574 .pushsection .fixup, "ax"
2575 .align 2
2576-4: mov pc, r9
2577+4: pax_close_userland
2578+ mov pc, r9
2579 .popsection
2580 .pushsection __ex_table,"a"
2581 .long 1b, 4b
2582@@ -694,7 +798,7 @@ ENTRY(__switch_to)
2583 THUMB( str lr, [ip], #4 )
2584 ldr r4, [r2, #TI_TP_VALUE]
2585 ldr r5, [r2, #TI_TP_VALUE + 4]
2586-#ifdef CONFIG_CPU_USE_DOMAINS
2587+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2588 ldr r6, [r2, #TI_CPU_DOMAIN]
2589 #endif
2590 switch_tls r1, r4, r5, r3, r7
2591@@ -703,7 +807,7 @@ ENTRY(__switch_to)
2592 ldr r8, =__stack_chk_guard
2593 ldr r7, [r7, #TSK_STACK_CANARY]
2594 #endif
2595-#ifdef CONFIG_CPU_USE_DOMAINS
2596+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2597 mcr p15, 0, r6, c3, c0, 0 @ Set domain register
2598 #endif
2599 mov r5, r0
2600diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
2601index a2dcafd..1048b5a 100644
2602--- a/arch/arm/kernel/entry-common.S
2603+++ b/arch/arm/kernel/entry-common.S
2604@@ -10,18 +10,46 @@
2605
2606 #include <asm/unistd.h>
2607 #include <asm/ftrace.h>
2608+#include <asm/domain.h>
2609 #include <asm/unwind.h>
2610
2611+#include "entry-header.S"
2612+
2613 #ifdef CONFIG_NEED_RET_TO_USER
2614 #include <mach/entry-macro.S>
2615 #else
2616 .macro arch_ret_to_user, tmp1, tmp2
2617+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2618+ @ save regs
2619+ stmdb sp!, {r1, r2}
2620+ @ read DACR from cpu_domain into r1
2621+ mov r2, sp
2622+ @ assume 8K pages, since we have to split the immediate in two
2623+ bic r2, r2, #(0x1fc0)
2624+ bic r2, r2, #(0x3f)
2625+ ldr r1, [r2, #TI_CPU_DOMAIN]
2626+#ifdef CONFIG_PAX_KERNEXEC
2627+ @ set type of DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
2628+ bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
2629+ orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
2630+#endif
2631+#ifdef CONFIG_PAX_MEMORY_UDEREF
2632+ @ set current DOMAIN_USER to DOMAIN_UDEREF
2633+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2634+ orr r1, r1, #(domain_val(DOMAIN_USER, DOMAIN_UDEREF))
2635+#endif
2636+ @ write r1 to current_thread_info()->cpu_domain
2637+ str r1, [r2, #TI_CPU_DOMAIN]
2638+ @ write r1 to DACR
2639+ mcr p15, 0, r1, c3, c0, 0
2640+ @ instruction sync
2641+ instr_sync
2642+ @ restore regs
2643+ ldmia sp!, {r1, r2}
2644+#endif
2645 .endm
2646 #endif
2647
2648-#include "entry-header.S"
2649-
2650-
2651 .align 5
2652 /*
2653 * This is the fast syscall return path. We do as little as
2654@@ -411,6 +439,12 @@ ENTRY(vector_swi)
2655 USER( ldr scno, [lr, #-4] ) @ get SWI instruction
2656 #endif
2657
2658+ /*
2659+ * do this here to avoid a performance hit of wrapping the code above
2660+ * that directly dereferences userland to parse the SWI instruction
2661+ */
2662+ pax_enter_kernel_user
2663+
2664 adr tbl, sys_call_table @ load syscall table pointer
2665
2666 #if defined(CONFIG_OABI_COMPAT)
2667diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S
2668index 39f89fb..d612bd9 100644
2669--- a/arch/arm/kernel/entry-header.S
2670+++ b/arch/arm/kernel/entry-header.S
2671@@ -184,6 +184,60 @@
2672 msr cpsr_c, \rtemp @ switch back to the SVC mode
2673 .endm
2674
2675+ .macro pax_enter_kernel_user
2676+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2677+ @ save regs
2678+ stmdb sp!, {r0, r1}
2679+ @ read DACR from cpu_domain into r1
2680+ mov r0, sp
2681+ @ assume 8K pages, since we have to split the immediate in two
2682+ bic r0, r0, #(0x1fc0)
2683+ bic r0, r0, #(0x3f)
2684+ ldr r1, [r0, #TI_CPU_DOMAIN]
2685+#ifdef CONFIG_PAX_MEMORY_UDEREF
2686+ @ set current DOMAIN_USER to DOMAIN_NOACCESS
2687+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2688+#endif
2689+#ifdef CONFIG_PAX_KERNEXEC
2690+ @ set current DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
2691+ bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
2692+ orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
2693+#endif
2694+ @ write r1 to current_thread_info()->cpu_domain
2695+ str r1, [r0, #TI_CPU_DOMAIN]
2696+ @ write r1 to DACR
2697+ mcr p15, 0, r1, c3, c0, 0
2698+ @ instruction sync
2699+ instr_sync
2700+ @ restore regs
2701+ ldmia sp!, {r0, r1}
2702+#endif
2703+ .endm
2704+
2705+ .macro pax_exit_kernel
2706+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2707+ @ save regs
2708+ stmdb sp!, {r0, r1}
2709+ @ read old DACR from stack into r1
2710+ ldr r1, [sp, #(8 + S_SP)]
2711+ sub r1, r1, #8
2712+ ldr r1, [r1]
2713+
2714+ @ write r1 to current_thread_info()->cpu_domain
2715+ mov r0, sp
2716+ @ assume 8K pages, since we have to split the immediate in two
2717+ bic r0, r0, #(0x1fc0)
2718+ bic r0, r0, #(0x3f)
2719+ str r1, [r0, #TI_CPU_DOMAIN]
2720+ @ write r1 to DACR
2721+ mcr p15, 0, r1, c3, c0, 0
2722+ @ instruction sync
2723+ instr_sync
2724+ @ restore regs
2725+ ldmia sp!, {r0, r1}
2726+#endif
2727+ .endm
2728+
2729 #ifndef CONFIG_THUMB2_KERNEL
2730 .macro svc_exit, rpsr, irq = 0
2731 .if \irq != 0
2732@@ -203,6 +257,9 @@
2733 blne trace_hardirqs_off
2734 #endif
2735 .endif
2736+
2737+ pax_exit_kernel
2738+
2739 msr spsr_cxsf, \rpsr
2740 #if defined(CONFIG_CPU_V6)
2741 ldr r0, [sp]
2742@@ -266,6 +323,9 @@
2743 blne trace_hardirqs_off
2744 #endif
2745 .endif
2746+
2747+ pax_exit_kernel
2748+
2749 ldr lr, [sp, #S_SP] @ top of the stack
2750 ldrd r0, r1, [sp, #S_LR] @ calling lr and pc
2751 clrex @ clear the exclusive monitor
2752diff --git a/arch/arm/kernel/fiq.c b/arch/arm/kernel/fiq.c
2753index 918875d..cd5fa27 100644
2754--- a/arch/arm/kernel/fiq.c
2755+++ b/arch/arm/kernel/fiq.c
2756@@ -87,7 +87,10 @@ void set_fiq_handler(void *start, unsigned int length)
2757 void *base = vectors_page;
2758 unsigned offset = FIQ_OFFSET;
2759
2760+ pax_open_kernel();
2761 memcpy(base + offset, start, length);
2762+ pax_close_kernel();
2763+
2764 if (!cache_is_vipt_nonaliasing())
2765 flush_icache_range((unsigned long)base + offset, offset +
2766 length);
2767diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
2768index f5f381d..a6f36a1 100644
2769--- a/arch/arm/kernel/head.S
2770+++ b/arch/arm/kernel/head.S
2771@@ -437,7 +437,7 @@ __enable_mmu:
2772 mov r5, #(domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
2773 domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
2774 domain_val(DOMAIN_TABLE, DOMAIN_MANAGER) | \
2775- domain_val(DOMAIN_IO, DOMAIN_CLIENT))
2776+ domain_val(DOMAIN_IO, DOMAIN_KERNELCLIENT))
2777 mcr p15, 0, r5, c3, c0, 0 @ load domain access register
2778 mcr p15, 0, r4, c2, c0, 0 @ load page table pointer
2779 #endif
2780diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c
2781index 45e4781..8eac93d 100644
2782--- a/arch/arm/kernel/module.c
2783+++ b/arch/arm/kernel/module.c
2784@@ -38,12 +38,39 @@
2785 #endif
2786
2787 #ifdef CONFIG_MMU
2788-void *module_alloc(unsigned long size)
2789+static inline void *__module_alloc(unsigned long size, pgprot_t prot)
2790 {
2791+ if (!size || PAGE_ALIGN(size) > MODULES_END - MODULES_VADDR)
2792+ return NULL;
2793 return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
2794- GFP_KERNEL, PAGE_KERNEL_EXEC, NUMA_NO_NODE,
2795+ GFP_KERNEL, prot, NUMA_NO_NODE,
2796 __builtin_return_address(0));
2797 }
2798+
2799+void *module_alloc(unsigned long size)
2800+{
2801+
2802+#ifdef CONFIG_PAX_KERNEXEC
2803+ return __module_alloc(size, PAGE_KERNEL);
2804+#else
2805+ return __module_alloc(size, PAGE_KERNEL_EXEC);
2806+#endif
2807+
2808+}
2809+
2810+#ifdef CONFIG_PAX_KERNEXEC
2811+void module_free_exec(struct module *mod, void *module_region)
2812+{
2813+ module_free(mod, module_region);
2814+}
2815+EXPORT_SYMBOL(module_free_exec);
2816+
2817+void *module_alloc_exec(unsigned long size)
2818+{
2819+ return __module_alloc(size, PAGE_KERNEL_EXEC);
2820+}
2821+EXPORT_SYMBOL(module_alloc_exec);
2822+#endif
2823 #endif
2824
2825 int
2826diff --git a/arch/arm/kernel/patch.c b/arch/arm/kernel/patch.c
2827index 07314af..c46655c 100644
2828--- a/arch/arm/kernel/patch.c
2829+++ b/arch/arm/kernel/patch.c
2830@@ -18,6 +18,7 @@ void __kprobes __patch_text(void *addr, unsigned int insn)
2831 bool thumb2 = IS_ENABLED(CONFIG_THUMB2_KERNEL);
2832 int size;
2833
2834+ pax_open_kernel();
2835 if (thumb2 && __opcode_is_thumb16(insn)) {
2836 *(u16 *)addr = __opcode_to_mem_thumb16(insn);
2837 size = sizeof(u16);
2838@@ -39,6 +40,7 @@ void __kprobes __patch_text(void *addr, unsigned int insn)
2839 *(u32 *)addr = insn;
2840 size = sizeof(u32);
2841 }
2842+ pax_close_kernel();
2843
2844 flush_icache_range((uintptr_t)(addr),
2845 (uintptr_t)(addr) + size);
2846diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
2847index 92f7b15..7048500 100644
2848--- a/arch/arm/kernel/process.c
2849+++ b/arch/arm/kernel/process.c
2850@@ -217,6 +217,7 @@ void machine_power_off(void)
2851
2852 if (pm_power_off)
2853 pm_power_off();
2854+ BUG();
2855 }
2856
2857 /*
2858@@ -230,7 +231,7 @@ void machine_power_off(void)
2859 * executing pre-reset code, and using RAM that the primary CPU's code wishes
2860 * to use. Implementing such co-ordination would be essentially impossible.
2861 */
2862-void machine_restart(char *cmd)
2863+__noreturn void machine_restart(char *cmd)
2864 {
2865 local_irq_disable();
2866 smp_send_stop();
2867@@ -253,8 +254,8 @@ void __show_regs(struct pt_regs *regs)
2868
2869 show_regs_print_info(KERN_DEFAULT);
2870
2871- print_symbol("PC is at %s\n", instruction_pointer(regs));
2872- print_symbol("LR is at %s\n", regs->ARM_lr);
2873+ printk("PC is at %pA\n", (void *)instruction_pointer(regs));
2874+ printk("LR is at %pA\n", (void *)regs->ARM_lr);
2875 printk("pc : [<%08lx>] lr : [<%08lx>] psr: %08lx\n"
2876 "sp : %08lx ip : %08lx fp : %08lx\n",
2877 regs->ARM_pc, regs->ARM_lr, regs->ARM_cpsr,
2878@@ -425,12 +426,6 @@ unsigned long get_wchan(struct task_struct *p)
2879 return 0;
2880 }
2881
2882-unsigned long arch_randomize_brk(struct mm_struct *mm)
2883-{
2884- unsigned long range_end = mm->brk + 0x02000000;
2885- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
2886-}
2887-
2888 #ifdef CONFIG_MMU
2889 #ifdef CONFIG_KUSER_HELPERS
2890 /*
2891@@ -446,7 +441,7 @@ static struct vm_area_struct gate_vma = {
2892
2893 static int __init gate_vma_init(void)
2894 {
2895- gate_vma.vm_page_prot = PAGE_READONLY_EXEC;
2896+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
2897 return 0;
2898 }
2899 arch_initcall(gate_vma_init);
2900@@ -472,41 +467,16 @@ int in_gate_area_no_mm(unsigned long addr)
2901
2902 const char *arch_vma_name(struct vm_area_struct *vma)
2903 {
2904- return is_gate_vma(vma) ? "[vectors]" :
2905- (vma->vm_mm && vma->vm_start == vma->vm_mm->context.sigpage) ?
2906- "[sigpage]" : NULL;
2907+ return is_gate_vma(vma) ? "[vectors]" : NULL;
2908 }
2909
2910-static struct page *signal_page;
2911-extern struct page *get_signal_page(void);
2912-
2913 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
2914 {
2915 struct mm_struct *mm = current->mm;
2916- unsigned long addr;
2917- int ret;
2918-
2919- if (!signal_page)
2920- signal_page = get_signal_page();
2921- if (!signal_page)
2922- return -ENOMEM;
2923
2924 down_write(&mm->mmap_sem);
2925- addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
2926- if (IS_ERR_VALUE(addr)) {
2927- ret = addr;
2928- goto up_fail;
2929- }
2930-
2931- ret = install_special_mapping(mm, addr, PAGE_SIZE,
2932- VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC,
2933- &signal_page);
2934-
2935- if (ret == 0)
2936- mm->context.sigpage = addr;
2937-
2938- up_fail:
2939+ mm->context.sigpage = (PAGE_OFFSET + (get_random_int() % 0x3FFEFFE0)) & 0xFFFFFFFC;
2940 up_write(&mm->mmap_sem);
2941- return ret;
2942+ return 0;
2943 }
2944 #endif
2945diff --git a/arch/arm/kernel/psci.c b/arch/arm/kernel/psci.c
2946index 4693188..4596c5e 100644
2947--- a/arch/arm/kernel/psci.c
2948+++ b/arch/arm/kernel/psci.c
2949@@ -24,7 +24,7 @@
2950 #include <asm/opcodes-virt.h>
2951 #include <asm/psci.h>
2952
2953-struct psci_operations psci_ops;
2954+struct psci_operations psci_ops __read_only;
2955
2956 static int (*invoke_psci_fn)(u32, u32, u32, u32);
2957
2958diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
2959index 0dd3b79..e018f64 100644
2960--- a/arch/arm/kernel/ptrace.c
2961+++ b/arch/arm/kernel/ptrace.c
2962@@ -929,10 +929,19 @@ static int tracehook_report_syscall(struct pt_regs *regs,
2963 return current_thread_info()->syscall;
2964 }
2965
2966+#ifdef CONFIG_GRKERNSEC_SETXID
2967+extern void gr_delayed_cred_worker(void);
2968+#endif
2969+
2970 asmlinkage int syscall_trace_enter(struct pt_regs *regs, int scno)
2971 {
2972 current_thread_info()->syscall = scno;
2973
2974+#ifdef CONFIG_GRKERNSEC_SETXID
2975+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
2976+ gr_delayed_cred_worker();
2977+#endif
2978+
2979 /* Do the secure computing check first; failures should be fast. */
2980 if (secure_computing(scno) == -1)
2981 return -1;
2982diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
2983index 1e8b030..37c3022 100644
2984--- a/arch/arm/kernel/setup.c
2985+++ b/arch/arm/kernel/setup.c
2986@@ -100,21 +100,23 @@ EXPORT_SYMBOL(system_serial_high);
2987 unsigned int elf_hwcap __read_mostly;
2988 EXPORT_SYMBOL(elf_hwcap);
2989
2990+pteval_t __supported_pte_mask __read_only;
2991+pmdval_t __supported_pmd_mask __read_only;
2992
2993 #ifdef MULTI_CPU
2994-struct processor processor __read_mostly;
2995+struct processor processor __read_only;
2996 #endif
2997 #ifdef MULTI_TLB
2998-struct cpu_tlb_fns cpu_tlb __read_mostly;
2999+struct cpu_tlb_fns cpu_tlb __read_only;
3000 #endif
3001 #ifdef MULTI_USER
3002-struct cpu_user_fns cpu_user __read_mostly;
3003+struct cpu_user_fns cpu_user __read_only;
3004 #endif
3005 #ifdef MULTI_CACHE
3006-struct cpu_cache_fns cpu_cache __read_mostly;
3007+struct cpu_cache_fns cpu_cache __read_only;
3008 #endif
3009 #ifdef CONFIG_OUTER_CACHE
3010-struct outer_cache_fns outer_cache __read_mostly;
3011+struct outer_cache_fns outer_cache __read_only;
3012 EXPORT_SYMBOL(outer_cache);
3013 #endif
3014
3015@@ -247,9 +249,13 @@ static int __get_cpu_architecture(void)
3016 asm("mrc p15, 0, %0, c0, c1, 4"
3017 : "=r" (mmfr0));
3018 if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
3019- (mmfr0 & 0x000000f0) >= 0x00000030)
3020+ (mmfr0 & 0x000000f0) >= 0x00000030) {
3021 cpu_arch = CPU_ARCH_ARMv7;
3022- else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
3023+ if ((mmfr0 & 0x0000000f) == 0x00000005 || (mmfr0 & 0x0000000f) == 0x00000004) {
3024+ __supported_pte_mask |= L_PTE_PXN;
3025+ __supported_pmd_mask |= PMD_PXNTABLE;
3026+ }
3027+ } else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
3028 (mmfr0 & 0x000000f0) == 0x00000020)
3029 cpu_arch = CPU_ARCH_ARMv6;
3030 else
3031diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
3032index 04d6388..5115238 100644
3033--- a/arch/arm/kernel/signal.c
3034+++ b/arch/arm/kernel/signal.c
3035@@ -23,8 +23,6 @@
3036
3037 extern const unsigned long sigreturn_codes[7];
3038
3039-static unsigned long signal_return_offset;
3040-
3041 #ifdef CONFIG_CRUNCH
3042 static int preserve_crunch_context(struct crunch_sigframe __user *frame)
3043 {
3044@@ -395,8 +393,7 @@ setup_return(struct pt_regs *regs, struct ksignal *ksig,
3045 * except when the MPU has protected the vectors
3046 * page from PL0
3047 */
3048- retcode = mm->context.sigpage + signal_return_offset +
3049- (idx << 2) + thumb;
3050+ retcode = mm->context.sigpage + (idx << 2) + thumb;
3051 } else
3052 #endif
3053 {
3054@@ -600,33 +597,3 @@ do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall)
3055 } while (thread_flags & _TIF_WORK_MASK);
3056 return 0;
3057 }
3058-
3059-struct page *get_signal_page(void)
3060-{
3061- unsigned long ptr;
3062- unsigned offset;
3063- struct page *page;
3064- void *addr;
3065-
3066- page = alloc_pages(GFP_KERNEL, 0);
3067-
3068- if (!page)
3069- return NULL;
3070-
3071- addr = page_address(page);
3072-
3073- /* Give the signal return code some randomness */
3074- offset = 0x200 + (get_random_int() & 0x7fc);
3075- signal_return_offset = offset;
3076-
3077- /*
3078- * Copy signal return handlers into the vector page, and
3079- * set sigreturn to be a pointer to these.
3080- */
3081- memcpy(addr + offset, sigreturn_codes, sizeof(sigreturn_codes));
3082-
3083- ptr = (unsigned long)addr + offset;
3084- flush_icache_range(ptr, ptr + sizeof(sigreturn_codes));
3085-
3086- return page;
3087-}
3088diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
3089index b7b4c86..47c4f77 100644
3090--- a/arch/arm/kernel/smp.c
3091+++ b/arch/arm/kernel/smp.c
3092@@ -73,7 +73,7 @@ enum ipi_msg_type {
3093
3094 static DECLARE_COMPLETION(cpu_running);
3095
3096-static struct smp_operations smp_ops;
3097+static struct smp_operations smp_ops __read_only;
3098
3099 void __init smp_set_ops(struct smp_operations *ops)
3100 {
3101diff --git a/arch/arm/kernel/tcm.c b/arch/arm/kernel/tcm.c
3102index 7a3be1d..b00c7de 100644
3103--- a/arch/arm/kernel/tcm.c
3104+++ b/arch/arm/kernel/tcm.c
3105@@ -61,7 +61,7 @@ static struct map_desc itcm_iomap[] __initdata = {
3106 .virtual = ITCM_OFFSET,
3107 .pfn = __phys_to_pfn(ITCM_OFFSET),
3108 .length = 0,
3109- .type = MT_MEMORY_RWX_ITCM,
3110+ .type = MT_MEMORY_RX_ITCM,
3111 }
3112 };
3113
3114@@ -267,7 +267,9 @@ no_dtcm:
3115 start = &__sitcm_text;
3116 end = &__eitcm_text;
3117 ram = &__itcm_start;
3118+ pax_open_kernel();
3119 memcpy(start, ram, itcm_code_sz);
3120+ pax_close_kernel();
3121 pr_debug("CPU ITCM: copied code from %p - %p\n",
3122 start, end);
3123 itcm_present = true;
3124diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
3125index 172ee18..ce4ec3d 100644
3126--- a/arch/arm/kernel/traps.c
3127+++ b/arch/arm/kernel/traps.c
3128@@ -62,7 +62,7 @@ static void dump_mem(const char *, const char *, unsigned long, unsigned long);
3129 void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame)
3130 {
3131 #ifdef CONFIG_KALLSYMS
3132- printk("[<%08lx>] (%ps) from [<%08lx>] (%pS)\n", where, (void *)where, from, (void *)from);
3133+ printk("[<%08lx>] (%pA) from [<%08lx>] (%pA)\n", where, (void *)where, from, (void *)from);
3134 #else
3135 printk("Function entered at [<%08lx>] from [<%08lx>]\n", where, from);
3136 #endif
3137@@ -264,6 +264,8 @@ static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED;
3138 static int die_owner = -1;
3139 static unsigned int die_nest_count;
3140
3141+extern void gr_handle_kernel_exploit(void);
3142+
3143 static unsigned long oops_begin(void)
3144 {
3145 int cpu;
3146@@ -306,6 +308,9 @@ static void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
3147 panic("Fatal exception in interrupt");
3148 if (panic_on_oops)
3149 panic("Fatal exception");
3150+
3151+ gr_handle_kernel_exploit();
3152+
3153 if (signr)
3154 do_exit(signr);
3155 }
3156@@ -642,7 +647,9 @@ asmlinkage int arm_syscall(int no, struct pt_regs *regs)
3157 * The user helper at 0xffff0fe0 must be used instead.
3158 * (see entry-armv.S for details)
3159 */
3160+ pax_open_kernel();
3161 *((unsigned int *)0xffff0ff0) = regs->ARM_r0;
3162+ pax_close_kernel();
3163 }
3164 return 0;
3165
3166@@ -899,7 +906,11 @@ void __init early_trap_init(void *vectors_base)
3167 kuser_init(vectors_base);
3168
3169 flush_icache_range(vectors, vectors + PAGE_SIZE * 2);
3170- modify_domain(DOMAIN_USER, DOMAIN_CLIENT);
3171+
3172+#ifndef CONFIG_PAX_MEMORY_UDEREF
3173+ modify_domain(DOMAIN_USER, DOMAIN_USERCLIENT);
3174+#endif
3175+
3176 #else /* ifndef CONFIG_CPU_V7M */
3177 /*
3178 * on V7-M there is no need to copy the vector table to a dedicated
3179diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
3180index 7bcee5c..e2f3249 100644
3181--- a/arch/arm/kernel/vmlinux.lds.S
3182+++ b/arch/arm/kernel/vmlinux.lds.S
3183@@ -8,7 +8,11 @@
3184 #include <asm/thread_info.h>
3185 #include <asm/memory.h>
3186 #include <asm/page.h>
3187-
3188+
3189+#ifdef CONFIG_PAX_KERNEXEC
3190+#include <asm/pgtable.h>
3191+#endif
3192+
3193 #define PROC_INFO \
3194 . = ALIGN(4); \
3195 VMLINUX_SYMBOL(__proc_info_begin) = .; \
3196@@ -34,7 +38,7 @@
3197 #endif
3198
3199 #if (defined(CONFIG_SMP_ON_UP) && !defined(CONFIG_DEBUG_SPINLOCK)) || \
3200- defined(CONFIG_GENERIC_BUG)
3201+ defined(CONFIG_GENERIC_BUG) || defined(CONFIG_PAX_REFCOUNT)
3202 #define ARM_EXIT_KEEP(x) x
3203 #define ARM_EXIT_DISCARD(x)
3204 #else
3205@@ -90,6 +94,11 @@ SECTIONS
3206 _text = .;
3207 HEAD_TEXT
3208 }
3209+
3210+#ifdef CONFIG_PAX_KERNEXEC
3211+ . = ALIGN(1<<SECTION_SHIFT);
3212+#endif
3213+
3214 .text : { /* Real text segment */
3215 _stext = .; /* Text and read-only data */
3216 __exception_text_start = .;
3217@@ -112,6 +121,8 @@ SECTIONS
3218 ARM_CPU_KEEP(PROC_INFO)
3219 }
3220
3221+ _etext = .; /* End of text section */
3222+
3223 RO_DATA(PAGE_SIZE)
3224
3225 . = ALIGN(4);
3226@@ -142,7 +153,9 @@ SECTIONS
3227
3228 NOTES
3229
3230- _etext = .; /* End of text and rodata section */
3231+#ifdef CONFIG_PAX_KERNEXEC
3232+ . = ALIGN(1<<SECTION_SHIFT);
3233+#endif
3234
3235 #ifndef CONFIG_XIP_KERNEL
3236 . = ALIGN(PAGE_SIZE);
3237@@ -220,6 +233,11 @@ SECTIONS
3238 . = PAGE_OFFSET + TEXT_OFFSET;
3239 #else
3240 __init_end = .;
3241+
3242+#ifdef CONFIG_PAX_KERNEXEC
3243+ . = ALIGN(1<<SECTION_SHIFT);
3244+#endif
3245+
3246 . = ALIGN(THREAD_SIZE);
3247 __data_loc = .;
3248 #endif
3249diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
3250index bd18bb8..87ede26 100644
3251--- a/arch/arm/kvm/arm.c
3252+++ b/arch/arm/kvm/arm.c
3253@@ -57,7 +57,7 @@ static unsigned long hyp_default_vectors;
3254 static DEFINE_PER_CPU(struct kvm_vcpu *, kvm_arm_running_vcpu);
3255
3256 /* The VMID used in the VTTBR */
3257-static atomic64_t kvm_vmid_gen = ATOMIC64_INIT(1);
3258+static atomic64_unchecked_t kvm_vmid_gen = ATOMIC64_INIT(1);
3259 static u8 kvm_next_vmid;
3260 static DEFINE_SPINLOCK(kvm_vmid_lock);
3261
3262@@ -408,7 +408,7 @@ void force_vm_exit(const cpumask_t *mask)
3263 */
3264 static bool need_new_vmid_gen(struct kvm *kvm)
3265 {
3266- return unlikely(kvm->arch.vmid_gen != atomic64_read(&kvm_vmid_gen));
3267+ return unlikely(kvm->arch.vmid_gen != atomic64_read_unchecked(&kvm_vmid_gen));
3268 }
3269
3270 /**
3271@@ -441,7 +441,7 @@ static void update_vttbr(struct kvm *kvm)
3272
3273 /* First user of a new VMID generation? */
3274 if (unlikely(kvm_next_vmid == 0)) {
3275- atomic64_inc(&kvm_vmid_gen);
3276+ atomic64_inc_unchecked(&kvm_vmid_gen);
3277 kvm_next_vmid = 1;
3278
3279 /*
3280@@ -458,7 +458,7 @@ static void update_vttbr(struct kvm *kvm)
3281 kvm_call_hyp(__kvm_flush_vm_context);
3282 }
3283
3284- kvm->arch.vmid_gen = atomic64_read(&kvm_vmid_gen);
3285+ kvm->arch.vmid_gen = atomic64_read_unchecked(&kvm_vmid_gen);
3286 kvm->arch.vmid = kvm_next_vmid;
3287 kvm_next_vmid++;
3288
3289diff --git a/arch/arm/lib/clear_user.S b/arch/arm/lib/clear_user.S
3290index 14a0d98..7771a7d 100644
3291--- a/arch/arm/lib/clear_user.S
3292+++ b/arch/arm/lib/clear_user.S
3293@@ -12,14 +12,14 @@
3294
3295 .text
3296
3297-/* Prototype: int __clear_user(void *addr, size_t sz)
3298+/* Prototype: int ___clear_user(void *addr, size_t sz)
3299 * Purpose : clear some user memory
3300 * Params : addr - user memory address to clear
3301 * : sz - number of bytes to clear
3302 * Returns : number of bytes NOT cleared
3303 */
3304 ENTRY(__clear_user_std)
3305-WEAK(__clear_user)
3306+WEAK(___clear_user)
3307 stmfd sp!, {r1, lr}
3308 mov r2, #0
3309 cmp r1, #4
3310@@ -44,7 +44,7 @@ WEAK(__clear_user)
3311 USER( strnebt r2, [r0])
3312 mov r0, #0
3313 ldmfd sp!, {r1, pc}
3314-ENDPROC(__clear_user)
3315+ENDPROC(___clear_user)
3316 ENDPROC(__clear_user_std)
3317
3318 .pushsection .fixup,"ax"
3319diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S
3320index 66a477a..bee61d3 100644
3321--- a/arch/arm/lib/copy_from_user.S
3322+++ b/arch/arm/lib/copy_from_user.S
3323@@ -16,7 +16,7 @@
3324 /*
3325 * Prototype:
3326 *
3327- * size_t __copy_from_user(void *to, const void *from, size_t n)
3328+ * size_t ___copy_from_user(void *to, const void *from, size_t n)
3329 *
3330 * Purpose:
3331 *
3332@@ -84,11 +84,11 @@
3333
3334 .text
3335
3336-ENTRY(__copy_from_user)
3337+ENTRY(___copy_from_user)
3338
3339 #include "copy_template.S"
3340
3341-ENDPROC(__copy_from_user)
3342+ENDPROC(___copy_from_user)
3343
3344 .pushsection .fixup,"ax"
3345 .align 0
3346diff --git a/arch/arm/lib/copy_page.S b/arch/arm/lib/copy_page.S
3347index 6ee2f67..d1cce76 100644
3348--- a/arch/arm/lib/copy_page.S
3349+++ b/arch/arm/lib/copy_page.S
3350@@ -10,6 +10,7 @@
3351 * ASM optimised string functions
3352 */
3353 #include <linux/linkage.h>
3354+#include <linux/const.h>
3355 #include <asm/assembler.h>
3356 #include <asm/asm-offsets.h>
3357 #include <asm/cache.h>
3358diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S
3359index d066df6..df28194 100644
3360--- a/arch/arm/lib/copy_to_user.S
3361+++ b/arch/arm/lib/copy_to_user.S
3362@@ -16,7 +16,7 @@
3363 /*
3364 * Prototype:
3365 *
3366- * size_t __copy_to_user(void *to, const void *from, size_t n)
3367+ * size_t ___copy_to_user(void *to, const void *from, size_t n)
3368 *
3369 * Purpose:
3370 *
3371@@ -88,11 +88,11 @@
3372 .text
3373
3374 ENTRY(__copy_to_user_std)
3375-WEAK(__copy_to_user)
3376+WEAK(___copy_to_user)
3377
3378 #include "copy_template.S"
3379
3380-ENDPROC(__copy_to_user)
3381+ENDPROC(___copy_to_user)
3382 ENDPROC(__copy_to_user_std)
3383
3384 .pushsection .fixup,"ax"
3385diff --git a/arch/arm/lib/csumpartialcopyuser.S b/arch/arm/lib/csumpartialcopyuser.S
3386index 7d08b43..f7ca7ea 100644
3387--- a/arch/arm/lib/csumpartialcopyuser.S
3388+++ b/arch/arm/lib/csumpartialcopyuser.S
3389@@ -57,8 +57,8 @@
3390 * Returns : r0 = checksum, [[sp, #0], #0] = 0 or -EFAULT
3391 */
3392
3393-#define FN_ENTRY ENTRY(csum_partial_copy_from_user)
3394-#define FN_EXIT ENDPROC(csum_partial_copy_from_user)
3395+#define FN_ENTRY ENTRY(__csum_partial_copy_from_user)
3396+#define FN_EXIT ENDPROC(__csum_partial_copy_from_user)
3397
3398 #include "csumpartialcopygeneric.S"
3399
3400diff --git a/arch/arm/lib/delay.c b/arch/arm/lib/delay.c
3401index 5306de3..aed6d03 100644
3402--- a/arch/arm/lib/delay.c
3403+++ b/arch/arm/lib/delay.c
3404@@ -28,7 +28,7 @@
3405 /*
3406 * Default to the loop-based delay implementation.
3407 */
3408-struct arm_delay_ops arm_delay_ops = {
3409+struct arm_delay_ops arm_delay_ops __read_only = {
3410 .delay = __loop_delay,
3411 .const_udelay = __loop_const_udelay,
3412 .udelay = __loop_udelay,
3413diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
3414index 3e58d71..029817c 100644
3415--- a/arch/arm/lib/uaccess_with_memcpy.c
3416+++ b/arch/arm/lib/uaccess_with_memcpy.c
3417@@ -136,7 +136,7 @@ out:
3418 }
3419
3420 unsigned long
3421-__copy_to_user(void __user *to, const void *from, unsigned long n)
3422+___copy_to_user(void __user *to, const void *from, unsigned long n)
3423 {
3424 /*
3425 * This test is stubbed out of the main function above to keep
3426@@ -190,7 +190,7 @@ out:
3427 return n;
3428 }
3429
3430-unsigned long __clear_user(void __user *addr, unsigned long n)
3431+unsigned long ___clear_user(void __user *addr, unsigned long n)
3432 {
3433 /* See rational for this in __copy_to_user() above. */
3434 if (n < 64)
3435diff --git a/arch/arm/mach-at91/setup.c b/arch/arm/mach-at91/setup.c
3436index f7ca97b..3d7e719 100644
3437--- a/arch/arm/mach-at91/setup.c
3438+++ b/arch/arm/mach-at91/setup.c
3439@@ -81,7 +81,7 @@ void __init at91_init_sram(int bank, unsigned long base, unsigned int length)
3440
3441 desc->pfn = __phys_to_pfn(base);
3442 desc->length = length;
3443- desc->type = MT_MEMORY_RWX_NONCACHED;
3444+ desc->type = MT_MEMORY_RW_NONCACHED;
3445
3446 pr_info("AT91: sram at 0x%lx of 0x%x mapped at 0x%lx\n",
3447 base, length, desc->virtual);
3448diff --git a/arch/arm/mach-kirkwood/common.c b/arch/arm/mach-kirkwood/common.c
3449index f3407a5..bd4256f 100644
3450--- a/arch/arm/mach-kirkwood/common.c
3451+++ b/arch/arm/mach-kirkwood/common.c
3452@@ -156,7 +156,16 @@ static void clk_gate_fn_disable(struct clk_hw *hw)
3453 clk_gate_ops.disable(hw);
3454 }
3455
3456-static struct clk_ops clk_gate_fn_ops;
3457+static int clk_gate_fn_is_enabled(struct clk_hw *hw)
3458+{
3459+ return clk_gate_ops.is_enabled(hw);
3460+}
3461+
3462+static struct clk_ops clk_gate_fn_ops = {
3463+ .enable = clk_gate_fn_enable,
3464+ .disable = clk_gate_fn_disable,
3465+ .is_enabled = clk_gate_fn_is_enabled,
3466+};
3467
3468 static struct clk __init *clk_register_gate_fn(struct device *dev,
3469 const char *name,
3470@@ -190,14 +199,6 @@ static struct clk __init *clk_register_gate_fn(struct device *dev,
3471 gate_fn->fn_en = fn_en;
3472 gate_fn->fn_dis = fn_dis;
3473
3474- /* ops is the gate ops, but with our enable/disable functions */
3475- if (clk_gate_fn_ops.enable != clk_gate_fn_enable ||
3476- clk_gate_fn_ops.disable != clk_gate_fn_disable) {
3477- clk_gate_fn_ops = clk_gate_ops;
3478- clk_gate_fn_ops.enable = clk_gate_fn_enable;
3479- clk_gate_fn_ops.disable = clk_gate_fn_disable;
3480- }
3481-
3482 clk = clk_register(dev, &gate_fn->gate.hw);
3483
3484 if (IS_ERR(clk))
3485diff --git a/arch/arm/mach-omap2/board-n8x0.c b/arch/arm/mach-omap2/board-n8x0.c
3486index aead77a..a2253fa 100644
3487--- a/arch/arm/mach-omap2/board-n8x0.c
3488+++ b/arch/arm/mach-omap2/board-n8x0.c
3489@@ -568,7 +568,7 @@ static int n8x0_menelaus_late_init(struct device *dev)
3490 }
3491 #endif
3492
3493-static struct menelaus_platform_data n8x0_menelaus_platform_data __initdata = {
3494+static struct menelaus_platform_data n8x0_menelaus_platform_data __initconst = {
3495 .late_init = n8x0_menelaus_late_init,
3496 };
3497
3498diff --git a/arch/arm/mach-omap2/gpmc.c b/arch/arm/mach-omap2/gpmc.c
3499index ab43755..ccfa231 100644
3500--- a/arch/arm/mach-omap2/gpmc.c
3501+++ b/arch/arm/mach-omap2/gpmc.c
3502@@ -148,7 +148,6 @@ struct omap3_gpmc_regs {
3503 };
3504
3505 static struct gpmc_client_irq gpmc_client_irq[GPMC_NR_IRQ];
3506-static struct irq_chip gpmc_irq_chip;
3507 static int gpmc_irq_start;
3508
3509 static struct resource gpmc_mem_root;
3510@@ -716,6 +715,18 @@ static void gpmc_irq_noop(struct irq_data *data) { }
3511
3512 static unsigned int gpmc_irq_noop_ret(struct irq_data *data) { return 0; }
3513
3514+static struct irq_chip gpmc_irq_chip = {
3515+ .name = "gpmc",
3516+ .irq_startup = gpmc_irq_noop_ret,
3517+ .irq_enable = gpmc_irq_enable,
3518+ .irq_disable = gpmc_irq_disable,
3519+ .irq_shutdown = gpmc_irq_noop,
3520+ .irq_ack = gpmc_irq_noop,
3521+ .irq_mask = gpmc_irq_noop,
3522+ .irq_unmask = gpmc_irq_noop,
3523+
3524+};
3525+
3526 static int gpmc_setup_irq(void)
3527 {
3528 int i;
3529@@ -730,15 +741,6 @@ static int gpmc_setup_irq(void)
3530 return gpmc_irq_start;
3531 }
3532
3533- gpmc_irq_chip.name = "gpmc";
3534- gpmc_irq_chip.irq_startup = gpmc_irq_noop_ret;
3535- gpmc_irq_chip.irq_enable = gpmc_irq_enable;
3536- gpmc_irq_chip.irq_disable = gpmc_irq_disable;
3537- gpmc_irq_chip.irq_shutdown = gpmc_irq_noop;
3538- gpmc_irq_chip.irq_ack = gpmc_irq_noop;
3539- gpmc_irq_chip.irq_mask = gpmc_irq_noop;
3540- gpmc_irq_chip.irq_unmask = gpmc_irq_noop;
3541-
3542 gpmc_client_irq[0].bitmask = GPMC_IRQ_FIFOEVENTENABLE;
3543 gpmc_client_irq[1].bitmask = GPMC_IRQ_COUNT_EVENT;
3544
3545diff --git a/arch/arm/mach-omap2/omap-mpuss-lowpower.c b/arch/arm/mach-omap2/omap-mpuss-lowpower.c
3546index 667915d..2ee1219 100644
3547--- a/arch/arm/mach-omap2/omap-mpuss-lowpower.c
3548+++ b/arch/arm/mach-omap2/omap-mpuss-lowpower.c
3549@@ -84,7 +84,7 @@ struct cpu_pm_ops {
3550 int (*finish_suspend)(unsigned long cpu_state);
3551 void (*resume)(void);
3552 void (*scu_prepare)(unsigned int cpu_id, unsigned int cpu_state);
3553-};
3554+} __no_const;
3555
3556 static DEFINE_PER_CPU(struct omap4_cpu_pm_info, omap4_pm_info);
3557 static struct powerdomain *mpuss_pd;
3558@@ -102,7 +102,7 @@ static void dummy_cpu_resume(void)
3559 static void dummy_scu_prepare(unsigned int cpu_id, unsigned int cpu_state)
3560 {}
3561
3562-struct cpu_pm_ops omap_pm_ops = {
3563+static struct cpu_pm_ops omap_pm_ops __read_only = {
3564 .finish_suspend = default_finish_suspend,
3565 .resume = dummy_cpu_resume,
3566 .scu_prepare = dummy_scu_prepare,
3567diff --git a/arch/arm/mach-omap2/omap-wakeupgen.c b/arch/arm/mach-omap2/omap-wakeupgen.c
3568index 3664562..72f85c6 100644
3569--- a/arch/arm/mach-omap2/omap-wakeupgen.c
3570+++ b/arch/arm/mach-omap2/omap-wakeupgen.c
3571@@ -343,7 +343,7 @@ static int irq_cpu_hotplug_notify(struct notifier_block *self,
3572 return NOTIFY_OK;
3573 }
3574
3575-static struct notifier_block __refdata irq_hotplug_notifier = {
3576+static struct notifier_block irq_hotplug_notifier = {
3577 .notifier_call = irq_cpu_hotplug_notify,
3578 };
3579
3580diff --git a/arch/arm/mach-omap2/omap_device.c b/arch/arm/mach-omap2/omap_device.c
3581index 01ef59d..32ae28a8 100644
3582--- a/arch/arm/mach-omap2/omap_device.c
3583+++ b/arch/arm/mach-omap2/omap_device.c
3584@@ -510,7 +510,7 @@ void omap_device_delete(struct omap_device *od)
3585 struct platform_device __init *omap_device_build(const char *pdev_name,
3586 int pdev_id,
3587 struct omap_hwmod *oh,
3588- void *pdata, int pdata_len)
3589+ const void *pdata, int pdata_len)
3590 {
3591 struct omap_hwmod *ohs[] = { oh };
3592
3593@@ -538,7 +538,7 @@ struct platform_device __init *omap_device_build(const char *pdev_name,
3594 struct platform_device __init *omap_device_build_ss(const char *pdev_name,
3595 int pdev_id,
3596 struct omap_hwmod **ohs,
3597- int oh_cnt, void *pdata,
3598+ int oh_cnt, const void *pdata,
3599 int pdata_len)
3600 {
3601 int ret = -ENOMEM;
3602diff --git a/arch/arm/mach-omap2/omap_device.h b/arch/arm/mach-omap2/omap_device.h
3603index 78c02b3..c94109a 100644
3604--- a/arch/arm/mach-omap2/omap_device.h
3605+++ b/arch/arm/mach-omap2/omap_device.h
3606@@ -72,12 +72,12 @@ int omap_device_idle(struct platform_device *pdev);
3607 /* Core code interface */
3608
3609 struct platform_device *omap_device_build(const char *pdev_name, int pdev_id,
3610- struct omap_hwmod *oh, void *pdata,
3611+ struct omap_hwmod *oh, const void *pdata,
3612 int pdata_len);
3613
3614 struct platform_device *omap_device_build_ss(const char *pdev_name, int pdev_id,
3615 struct omap_hwmod **oh, int oh_cnt,
3616- void *pdata, int pdata_len);
3617+ const void *pdata, int pdata_len);
3618
3619 struct omap_device *omap_device_alloc(struct platform_device *pdev,
3620 struct omap_hwmod **ohs, int oh_cnt);
3621diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
3622index 66c60fe..c78950d 100644
3623--- a/arch/arm/mach-omap2/omap_hwmod.c
3624+++ b/arch/arm/mach-omap2/omap_hwmod.c
3625@@ -194,10 +194,10 @@ struct omap_hwmod_soc_ops {
3626 int (*init_clkdm)(struct omap_hwmod *oh);
3627 void (*update_context_lost)(struct omap_hwmod *oh);
3628 int (*get_context_lost)(struct omap_hwmod *oh);
3629-};
3630+} __no_const;
3631
3632 /* soc_ops: adapts the omap_hwmod code to the currently-booted SoC */
3633-static struct omap_hwmod_soc_ops soc_ops;
3634+static struct omap_hwmod_soc_ops soc_ops __read_only;
3635
3636 /* omap_hwmod_list contains all registered struct omap_hwmods */
3637 static LIST_HEAD(omap_hwmod_list);
3638diff --git a/arch/arm/mach-omap2/powerdomains43xx_data.c b/arch/arm/mach-omap2/powerdomains43xx_data.c
3639index 95fee54..cfa9cf1 100644
3640--- a/arch/arm/mach-omap2/powerdomains43xx_data.c
3641+++ b/arch/arm/mach-omap2/powerdomains43xx_data.c
3642@@ -10,6 +10,7 @@
3643
3644 #include <linux/kernel.h>
3645 #include <linux/init.h>
3646+#include <asm/pgtable.h>
3647
3648 #include "powerdomain.h"
3649
3650@@ -129,7 +130,9 @@ static int am43xx_check_vcvp(void)
3651
3652 void __init am43xx_powerdomains_init(void)
3653 {
3654- omap4_pwrdm_operations.pwrdm_has_voltdm = am43xx_check_vcvp;
3655+ pax_open_kernel();
3656+ *(void **)&omap4_pwrdm_operations.pwrdm_has_voltdm = am43xx_check_vcvp;
3657+ pax_close_kernel();
3658 pwrdm_register_platform_funcs(&omap4_pwrdm_operations);
3659 pwrdm_register_pwrdms(powerdomains_am43xx);
3660 pwrdm_complete_init();
3661diff --git a/arch/arm/mach-omap2/wd_timer.c b/arch/arm/mach-omap2/wd_timer.c
3662index d15c7bb..b2d1f0c 100644
3663--- a/arch/arm/mach-omap2/wd_timer.c
3664+++ b/arch/arm/mach-omap2/wd_timer.c
3665@@ -110,7 +110,9 @@ static int __init omap_init_wdt(void)
3666 struct omap_hwmod *oh;
3667 char *oh_name = "wd_timer2";
3668 char *dev_name = "omap_wdt";
3669- struct omap_wd_timer_platform_data pdata;
3670+ static struct omap_wd_timer_platform_data pdata = {
3671+ .read_reset_sources = prm_read_reset_sources
3672+ };
3673
3674 if (!cpu_class_is_omap2() || of_have_populated_dt())
3675 return 0;
3676@@ -121,8 +123,6 @@ static int __init omap_init_wdt(void)
3677 return -EINVAL;
3678 }
3679
3680- pdata.read_reset_sources = prm_read_reset_sources;
3681-
3682 pdev = omap_device_build(dev_name, id, oh, &pdata,
3683 sizeof(struct omap_wd_timer_platform_data));
3684 WARN(IS_ERR(pdev), "Can't build omap_device for %s:%s.\n",
3685diff --git a/arch/arm/mach-tegra/cpuidle-tegra20.c b/arch/arm/mach-tegra/cpuidle-tegra20.c
3686index b82dcae..44ee5b6 100644
3687--- a/arch/arm/mach-tegra/cpuidle-tegra20.c
3688+++ b/arch/arm/mach-tegra/cpuidle-tegra20.c
3689@@ -180,7 +180,7 @@ static int tegra20_idle_lp2_coupled(struct cpuidle_device *dev,
3690 bool entered_lp2 = false;
3691
3692 if (tegra_pending_sgi())
3693- ACCESS_ONCE(abort_flag) = true;
3694+ ACCESS_ONCE_RW(abort_flag) = true;
3695
3696 cpuidle_coupled_parallel_barrier(dev, &abort_barrier);
3697
3698diff --git a/arch/arm/mach-ux500/setup.h b/arch/arm/mach-ux500/setup.h
3699index 2dea8b5..6499da2 100644
3700--- a/arch/arm/mach-ux500/setup.h
3701+++ b/arch/arm/mach-ux500/setup.h
3702@@ -33,13 +33,6 @@ extern void ux500_timer_init(void);
3703 .type = MT_DEVICE, \
3704 }
3705
3706-#define __MEM_DEV_DESC(x, sz) { \
3707- .virtual = IO_ADDRESS(x), \
3708- .pfn = __phys_to_pfn(x), \
3709- .length = sz, \
3710- .type = MT_MEMORY_RWX, \
3711-}
3712-
3713 extern struct smp_operations ux500_smp_ops;
3714 extern void ux500_cpu_die(unsigned int cpu);
3715
3716diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
3717index ca8ecde..58ba893 100644
3718--- a/arch/arm/mm/Kconfig
3719+++ b/arch/arm/mm/Kconfig
3720@@ -446,6 +446,7 @@ config CPU_32v5
3721
3722 config CPU_32v6
3723 bool
3724+ select CPU_USE_DOMAINS if CPU_V6 && MMU && !PAX_KERNEXEC && !PAX_MEMORY_UDEREF
3725 select TLS_REG_EMUL if !CPU_32v6K && !MMU
3726
3727 config CPU_32v6K
3728@@ -600,6 +601,7 @@ config CPU_CP15_MPU
3729
3730 config CPU_USE_DOMAINS
3731 bool
3732+ depends on !ARM_LPAE && !PAX_KERNEXEC && !PAX_MEMORY_UDEREF
3733 help
3734 This option enables or disables the use of domain switching
3735 via the set_fs() function.
3736@@ -799,6 +801,7 @@ config NEED_KUSER_HELPERS
3737 config KUSER_HELPERS
3738 bool "Enable kuser helpers in vector page" if !NEED_KUSER_HELPERS
3739 default y
3740+ depends on !(CPU_V6 || CPU_V6K || CPU_V7) || GRKERNSEC_OLD_ARM_USERLAND
3741 help
3742 Warning: disabling this option may break user programs.
3743
3744@@ -811,7 +814,7 @@ config KUSER_HELPERS
3745 See Documentation/arm/kernel_user_helpers.txt for details.
3746
3747 However, the fixed address nature of these helpers can be used
3748- by ROP (return orientated programming) authors when creating
3749+ by ROP (Return Oriented Programming) authors when creating
3750 exploits.
3751
3752 If all of the binaries and libraries which run on your platform
3753diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c
3754index 9240364..a2b8cf3 100644
3755--- a/arch/arm/mm/alignment.c
3756+++ b/arch/arm/mm/alignment.c
3757@@ -212,10 +212,12 @@ union offset_union {
3758 #define __get16_unaligned_check(ins,val,addr) \
3759 do { \
3760 unsigned int err = 0, v, a = addr; \
3761+ pax_open_userland(); \
3762 __get8_unaligned_check(ins,v,a,err); \
3763 val = v << ((BE) ? 8 : 0); \
3764 __get8_unaligned_check(ins,v,a,err); \
3765 val |= v << ((BE) ? 0 : 8); \
3766+ pax_close_userland(); \
3767 if (err) \
3768 goto fault; \
3769 } while (0)
3770@@ -229,6 +231,7 @@ union offset_union {
3771 #define __get32_unaligned_check(ins,val,addr) \
3772 do { \
3773 unsigned int err = 0, v, a = addr; \
3774+ pax_open_userland(); \
3775 __get8_unaligned_check(ins,v,a,err); \
3776 val = v << ((BE) ? 24 : 0); \
3777 __get8_unaligned_check(ins,v,a,err); \
3778@@ -237,6 +240,7 @@ union offset_union {
3779 val |= v << ((BE) ? 8 : 16); \
3780 __get8_unaligned_check(ins,v,a,err); \
3781 val |= v << ((BE) ? 0 : 24); \
3782+ pax_close_userland(); \
3783 if (err) \
3784 goto fault; \
3785 } while (0)
3786@@ -250,6 +254,7 @@ union offset_union {
3787 #define __put16_unaligned_check(ins,val,addr) \
3788 do { \
3789 unsigned int err = 0, v = val, a = addr; \
3790+ pax_open_userland(); \
3791 __asm__( FIRST_BYTE_16 \
3792 ARM( "1: "ins" %1, [%2], #1\n" ) \
3793 THUMB( "1: "ins" %1, [%2]\n" ) \
3794@@ -269,6 +274,7 @@ union offset_union {
3795 " .popsection\n" \
3796 : "=r" (err), "=&r" (v), "=&r" (a) \
3797 : "0" (err), "1" (v), "2" (a)); \
3798+ pax_close_userland(); \
3799 if (err) \
3800 goto fault; \
3801 } while (0)
3802@@ -282,6 +288,7 @@ union offset_union {
3803 #define __put32_unaligned_check(ins,val,addr) \
3804 do { \
3805 unsigned int err = 0, v = val, a = addr; \
3806+ pax_open_userland(); \
3807 __asm__( FIRST_BYTE_32 \
3808 ARM( "1: "ins" %1, [%2], #1\n" ) \
3809 THUMB( "1: "ins" %1, [%2]\n" ) \
3810@@ -311,6 +318,7 @@ union offset_union {
3811 " .popsection\n" \
3812 : "=r" (err), "=&r" (v), "=&r" (a) \
3813 : "0" (err), "1" (v), "2" (a)); \
3814+ pax_close_userland(); \
3815 if (err) \
3816 goto fault; \
3817 } while (0)
3818diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
3819index 7abde2c..9df495f 100644
3820--- a/arch/arm/mm/cache-l2x0.c
3821+++ b/arch/arm/mm/cache-l2x0.c
3822@@ -46,7 +46,7 @@ struct l2x0_of_data {
3823 void (*setup)(const struct device_node *, u32 *, u32 *);
3824 void (*save)(void);
3825 struct outer_cache_fns outer_cache;
3826-};
3827+} __do_const;
3828
3829 static bool of_init = false;
3830
3831diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c
3832index 6eb97b3..ac509f6 100644
3833--- a/arch/arm/mm/context.c
3834+++ b/arch/arm/mm/context.c
3835@@ -43,7 +43,7 @@
3836 #define NUM_USER_ASIDS ASID_FIRST_VERSION
3837
3838 static DEFINE_RAW_SPINLOCK(cpu_asid_lock);
3839-static atomic64_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION);
3840+static atomic64_unchecked_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION);
3841 static DECLARE_BITMAP(asid_map, NUM_USER_ASIDS);
3842
3843 static DEFINE_PER_CPU(atomic64_t, active_asids);
3844@@ -182,7 +182,7 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
3845 {
3846 static u32 cur_idx = 1;
3847 u64 asid = atomic64_read(&mm->context.id);
3848- u64 generation = atomic64_read(&asid_generation);
3849+ u64 generation = atomic64_read_unchecked(&asid_generation);
3850
3851 if (asid != 0 && is_reserved_asid(asid)) {
3852 /*
3853@@ -203,7 +203,7 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
3854 */
3855 asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, cur_idx);
3856 if (asid == NUM_USER_ASIDS) {
3857- generation = atomic64_add_return(ASID_FIRST_VERSION,
3858+ generation = atomic64_add_return_unchecked(ASID_FIRST_VERSION,
3859 &asid_generation);
3860 flush_context(cpu);
3861 asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1);
3862@@ -234,14 +234,14 @@ void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk)
3863 cpu_set_reserved_ttbr0();
3864
3865 asid = atomic64_read(&mm->context.id);
3866- if (!((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS)
3867+ if (!((asid ^ atomic64_read_unchecked(&asid_generation)) >> ASID_BITS)
3868 && atomic64_xchg(&per_cpu(active_asids, cpu), asid))
3869 goto switch_mm_fastpath;
3870
3871 raw_spin_lock_irqsave(&cpu_asid_lock, flags);
3872 /* Check that our ASID belongs to the current generation. */
3873 asid = atomic64_read(&mm->context.id);
3874- if ((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS) {
3875+ if ((asid ^ atomic64_read_unchecked(&asid_generation)) >> ASID_BITS) {
3876 asid = new_context(mm, cpu);
3877 atomic64_set(&mm->context.id, asid);
3878 }
3879diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
3880index eb8830a..5360ce7 100644
3881--- a/arch/arm/mm/fault.c
3882+++ b/arch/arm/mm/fault.c
3883@@ -25,6 +25,7 @@
3884 #include <asm/system_misc.h>
3885 #include <asm/system_info.h>
3886 #include <asm/tlbflush.h>
3887+#include <asm/sections.h>
3888
3889 #include "fault.h"
3890
3891@@ -138,6 +139,31 @@ __do_kernel_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
3892 if (fixup_exception(regs))
3893 return;
3894
3895+#ifdef CONFIG_PAX_MEMORY_UDEREF
3896+ if (addr < TASK_SIZE) {
3897+ if (current->signal->curr_ip)
3898+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
3899+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
3900+ else
3901+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", current->comm, task_pid_nr(current),
3902+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
3903+ }
3904+#endif
3905+
3906+#ifdef CONFIG_PAX_KERNEXEC
3907+ if ((fsr & FSR_WRITE) &&
3908+ (((unsigned long)_stext <= addr && addr < init_mm.end_code) ||
3909+ (MODULES_VADDR <= addr && addr < MODULES_END)))
3910+ {
3911+ if (current->signal->curr_ip)
3912+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
3913+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
3914+ else
3915+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", current->comm, task_pid_nr(current),
3916+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
3917+ }
3918+#endif
3919+
3920 /*
3921 * No handler, we'll have to terminate things with extreme prejudice.
3922 */
3923@@ -174,6 +200,13 @@ __do_user_fault(struct task_struct *tsk, unsigned long addr,
3924 }
3925 #endif
3926
3927+#ifdef CONFIG_PAX_PAGEEXEC
3928+ if (fsr & FSR_LNX_PF) {
3929+ pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
3930+ do_group_exit(SIGKILL);
3931+ }
3932+#endif
3933+
3934 tsk->thread.address = addr;
3935 tsk->thread.error_code = fsr;
3936 tsk->thread.trap_no = 14;
3937@@ -401,6 +434,33 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
3938 }
3939 #endif /* CONFIG_MMU */
3940
3941+#ifdef CONFIG_PAX_PAGEEXEC
3942+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
3943+{
3944+ long i;
3945+
3946+ printk(KERN_ERR "PAX: bytes at PC: ");
3947+ for (i = 0; i < 20; i++) {
3948+ unsigned char c;
3949+ if (get_user(c, (__force unsigned char __user *)pc+i))
3950+ printk(KERN_CONT "?? ");
3951+ else
3952+ printk(KERN_CONT "%02x ", c);
3953+ }
3954+ printk("\n");
3955+
3956+ printk(KERN_ERR "PAX: bytes at SP-4: ");
3957+ for (i = -1; i < 20; i++) {
3958+ unsigned long c;
3959+ if (get_user(c, (__force unsigned long __user *)sp+i))
3960+ printk(KERN_CONT "???????? ");
3961+ else
3962+ printk(KERN_CONT "%08lx ", c);
3963+ }
3964+ printk("\n");
3965+}
3966+#endif
3967+
3968 /*
3969 * First Level Translation Fault Handler
3970 *
3971@@ -548,9 +608,22 @@ do_DataAbort(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
3972 const struct fsr_info *inf = fsr_info + fsr_fs(fsr);
3973 struct siginfo info;
3974
3975+#ifdef CONFIG_PAX_MEMORY_UDEREF
3976+ if (addr < TASK_SIZE && is_domain_fault(fsr)) {
3977+ if (current->signal->curr_ip)
3978+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
3979+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
3980+ else
3981+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", current->comm, task_pid_nr(current),
3982+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
3983+ goto die;
3984+ }
3985+#endif
3986+
3987 if (!inf->fn(addr, fsr & ~FSR_LNX_PF, regs))
3988 return;
3989
3990+die:
3991 printk(KERN_ALERT "Unhandled fault: %s (0x%03x) at 0x%08lx\n",
3992 inf->name, fsr, addr);
3993
3994@@ -574,15 +647,98 @@ hook_ifault_code(int nr, int (*fn)(unsigned long, unsigned int, struct pt_regs *
3995 ifsr_info[nr].name = name;
3996 }
3997
3998+asmlinkage int sys_sigreturn(struct pt_regs *regs);
3999+asmlinkage int sys_rt_sigreturn(struct pt_regs *regs);
4000+
4001 asmlinkage void __exception
4002 do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs)
4003 {
4004 const struct fsr_info *inf = ifsr_info + fsr_fs(ifsr);
4005 struct siginfo info;
4006+ unsigned long pc = instruction_pointer(regs);
4007+
4008+ if (user_mode(regs)) {
4009+ unsigned long sigpage = current->mm->context.sigpage;
4010+
4011+ if (sigpage <= pc && pc < sigpage + 7*4) {
4012+ if (pc < sigpage + 3*4)
4013+ sys_sigreturn(regs);
4014+ else
4015+ sys_rt_sigreturn(regs);
4016+ return;
4017+ }
4018+ if (pc == 0xffff0f60UL) {
4019+ /*
4020+ * PaX: __kuser_cmpxchg64 emulation
4021+ */
4022+ // TODO
4023+ //regs->ARM_pc = regs->ARM_lr;
4024+ //return;
4025+ }
4026+ if (pc == 0xffff0fa0UL) {
4027+ /*
4028+ * PaX: __kuser_memory_barrier emulation
4029+ */
4030+ // dmb(); implied by the exception
4031+ regs->ARM_pc = regs->ARM_lr;
4032+ return;
4033+ }
4034+ if (pc == 0xffff0fc0UL) {
4035+ /*
4036+ * PaX: __kuser_cmpxchg emulation
4037+ */
4038+ // TODO
4039+ //long new;
4040+ //int op;
4041+
4042+ //op = FUTEX_OP_SET << 28;
4043+ //new = futex_atomic_op_inuser(op, regs->ARM_r2);
4044+ //regs->ARM_r0 = old != new;
4045+ //regs->ARM_pc = regs->ARM_lr;
4046+ //return;
4047+ }
4048+ if (pc == 0xffff0fe0UL) {
4049+ /*
4050+ * PaX: __kuser_get_tls emulation
4051+ */
4052+ regs->ARM_r0 = current_thread_info()->tp_value[0];
4053+ regs->ARM_pc = regs->ARM_lr;
4054+ return;
4055+ }
4056+ }
4057+
4058+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
4059+ else if (is_domain_fault(ifsr) || is_xn_fault(ifsr)) {
4060+ if (current->signal->curr_ip)
4061+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to execute %s memory at %08lx\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
4062+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()),
4063+ pc >= TASK_SIZE ? "non-executable kernel" : "userland", pc);
4064+ else
4065+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to execute %s memory at %08lx\n", current->comm, task_pid_nr(current),
4066+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()),
4067+ pc >= TASK_SIZE ? "non-executable kernel" : "userland", pc);
4068+ goto die;
4069+ }
4070+#endif
4071+
4072+#ifdef CONFIG_PAX_REFCOUNT
4073+ if (fsr_fs(ifsr) == FAULT_CODE_DEBUG) {
4074+ unsigned int bkpt;
4075+
4076+ if (!probe_kernel_address(pc, bkpt) && cpu_to_le32(bkpt) == 0xe12f1073) {
4077+ current->thread.error_code = ifsr;
4078+ current->thread.trap_no = 0;
4079+ pax_report_refcount_overflow(regs);
4080+ fixup_exception(regs);
4081+ return;
4082+ }
4083+ }
4084+#endif
4085
4086 if (!inf->fn(addr, ifsr | FSR_LNX_PF, regs))
4087 return;
4088
4089+die:
4090 printk(KERN_ALERT "Unhandled prefetch abort: %s (0x%03x) at 0x%08lx\n",
4091 inf->name, ifsr, addr);
4092
4093diff --git a/arch/arm/mm/fault.h b/arch/arm/mm/fault.h
4094index cf08bdf..772656c 100644
4095--- a/arch/arm/mm/fault.h
4096+++ b/arch/arm/mm/fault.h
4097@@ -3,6 +3,7 @@
4098
4099 /*
4100 * Fault status register encodings. We steal bit 31 for our own purposes.
4101+ * Set when the FSR value is from an instruction fault.
4102 */
4103 #define FSR_LNX_PF (1 << 31)
4104 #define FSR_WRITE (1 << 11)
4105@@ -22,6 +23,17 @@ static inline int fsr_fs(unsigned int fsr)
4106 }
4107 #endif
4108
4109+/* valid for LPAE and !LPAE */
4110+static inline int is_xn_fault(unsigned int fsr)
4111+{
4112+ return ((fsr_fs(fsr) & 0x3c) == 0xc);
4113+}
4114+
4115+static inline int is_domain_fault(unsigned int fsr)
4116+{
4117+ return ((fsr_fs(fsr) & 0xD) == 0x9);
4118+}
4119+
4120 void do_bad_area(unsigned long addr, unsigned int fsr, struct pt_regs *regs);
4121 unsigned long search_exception_table(unsigned long addr);
4122
4123diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
4124index 804d615..fcec50a 100644
4125--- a/arch/arm/mm/init.c
4126+++ b/arch/arm/mm/init.c
4127@@ -30,6 +30,8 @@
4128 #include <asm/setup.h>
4129 #include <asm/tlb.h>
4130 #include <asm/fixmap.h>
4131+#include <asm/system_info.h>
4132+#include <asm/cp15.h>
4133
4134 #include <asm/mach/arch.h>
4135 #include <asm/mach/map.h>
4136@@ -625,7 +627,46 @@ void free_initmem(void)
4137 {
4138 #ifdef CONFIG_HAVE_TCM
4139 extern char __tcm_start, __tcm_end;
4140+#endif
4141
4142+#ifdef CONFIG_PAX_KERNEXEC
4143+ unsigned long addr;
4144+ pgd_t *pgd;
4145+ pud_t *pud;
4146+ pmd_t *pmd;
4147+ int cpu_arch = cpu_architecture();
4148+ unsigned int cr = get_cr();
4149+
4150+ if (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP)) {
4151+ /* make pages tables, etc before .text NX */
4152+ for (addr = PAGE_OFFSET; addr < (unsigned long)_stext; addr += SECTION_SIZE) {
4153+ pgd = pgd_offset_k(addr);
4154+ pud = pud_offset(pgd, addr);
4155+ pmd = pmd_offset(pud, addr);
4156+ __section_update(pmd, addr, PMD_SECT_XN);
4157+ }
4158+ /* make init NX */
4159+ for (addr = (unsigned long)__init_begin; addr < (unsigned long)_sdata; addr += SECTION_SIZE) {
4160+ pgd = pgd_offset_k(addr);
4161+ pud = pud_offset(pgd, addr);
4162+ pmd = pmd_offset(pud, addr);
4163+ __section_update(pmd, addr, PMD_SECT_XN);
4164+ }
4165+ /* make kernel code/rodata RX */
4166+ for (addr = (unsigned long)_stext; addr < (unsigned long)__init_begin; addr += SECTION_SIZE) {
4167+ pgd = pgd_offset_k(addr);
4168+ pud = pud_offset(pgd, addr);
4169+ pmd = pmd_offset(pud, addr);
4170+#ifdef CONFIG_ARM_LPAE
4171+ __section_update(pmd, addr, PMD_SECT_RDONLY);
4172+#else
4173+ __section_update(pmd, addr, PMD_SECT_APX|PMD_SECT_AP_WRITE);
4174+#endif
4175+ }
4176+ }
4177+#endif
4178+
4179+#ifdef CONFIG_HAVE_TCM
4180 poison_init_mem(&__tcm_start, &__tcm_end - &__tcm_start);
4181 free_reserved_area(&__tcm_start, &__tcm_end, -1, "TCM link");
4182 #endif
4183diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
4184index f9c32ba..8540068 100644
4185--- a/arch/arm/mm/ioremap.c
4186+++ b/arch/arm/mm/ioremap.c
4187@@ -392,9 +392,9 @@ __arm_ioremap_exec(phys_addr_t phys_addr, size_t size, bool cached)
4188 unsigned int mtype;
4189
4190 if (cached)
4191- mtype = MT_MEMORY_RWX;
4192+ mtype = MT_MEMORY_RX;
4193 else
4194- mtype = MT_MEMORY_RWX_NONCACHED;
4195+ mtype = MT_MEMORY_RX_NONCACHED;
4196
4197 return __arm_ioremap_caller(phys_addr, size, mtype,
4198 __builtin_return_address(0));
4199diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
4200index 5e85ed3..b10a7ed 100644
4201--- a/arch/arm/mm/mmap.c
4202+++ b/arch/arm/mm/mmap.c
4203@@ -59,6 +59,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
4204 struct vm_area_struct *vma;
4205 int do_align = 0;
4206 int aliasing = cache_is_vipt_aliasing();
4207+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
4208 struct vm_unmapped_area_info info;
4209
4210 /*
4211@@ -81,6 +82,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
4212 if (len > TASK_SIZE)
4213 return -ENOMEM;
4214
4215+#ifdef CONFIG_PAX_RANDMMAP
4216+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
4217+#endif
4218+
4219 if (addr) {
4220 if (do_align)
4221 addr = COLOUR_ALIGN(addr, pgoff);
4222@@ -88,8 +93,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
4223 addr = PAGE_ALIGN(addr);
4224
4225 vma = find_vma(mm, addr);
4226- if (TASK_SIZE - len >= addr &&
4227- (!vma || addr + len <= vma->vm_start))
4228+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
4229 return addr;
4230 }
4231
4232@@ -99,6 +103,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
4233 info.high_limit = TASK_SIZE;
4234 info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
4235 info.align_offset = pgoff << PAGE_SHIFT;
4236+ info.threadstack_offset = offset;
4237 return vm_unmapped_area(&info);
4238 }
4239
4240@@ -112,6 +117,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4241 unsigned long addr = addr0;
4242 int do_align = 0;
4243 int aliasing = cache_is_vipt_aliasing();
4244+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
4245 struct vm_unmapped_area_info info;
4246
4247 /*
4248@@ -132,6 +138,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4249 return addr;
4250 }
4251
4252+#ifdef CONFIG_PAX_RANDMMAP
4253+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
4254+#endif
4255+
4256 /* requesting a specific address */
4257 if (addr) {
4258 if (do_align)
4259@@ -139,8 +149,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4260 else
4261 addr = PAGE_ALIGN(addr);
4262 vma = find_vma(mm, addr);
4263- if (TASK_SIZE - len >= addr &&
4264- (!vma || addr + len <= vma->vm_start))
4265+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
4266 return addr;
4267 }
4268
4269@@ -150,6 +159,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4270 info.high_limit = mm->mmap_base;
4271 info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
4272 info.align_offset = pgoff << PAGE_SHIFT;
4273+ info.threadstack_offset = offset;
4274 addr = vm_unmapped_area(&info);
4275
4276 /*
4277@@ -173,6 +183,10 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4278 {
4279 unsigned long random_factor = 0UL;
4280
4281+#ifdef CONFIG_PAX_RANDMMAP
4282+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
4283+#endif
4284+
4285 /* 8 bits of randomness in 20 address space bits */
4286 if ((current->flags & PF_RANDOMIZE) &&
4287 !(current->personality & ADDR_NO_RANDOMIZE))
4288@@ -180,9 +194,21 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4289
4290 if (mmap_is_legacy()) {
4291 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
4292+
4293+#ifdef CONFIG_PAX_RANDMMAP
4294+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4295+ mm->mmap_base += mm->delta_mmap;
4296+#endif
4297+
4298 mm->get_unmapped_area = arch_get_unmapped_area;
4299 } else {
4300 mm->mmap_base = mmap_base(random_factor);
4301+
4302+#ifdef CONFIG_PAX_RANDMMAP
4303+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4304+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4305+#endif
4306+
4307 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
4308 }
4309 }
4310diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
4311index b68c6b2..f66c492 100644
4312--- a/arch/arm/mm/mmu.c
4313+++ b/arch/arm/mm/mmu.c
4314@@ -39,6 +39,22 @@
4315 #include "mm.h"
4316 #include "tcm.h"
4317
4318+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
4319+void modify_domain(unsigned int dom, unsigned int type)
4320+{
4321+ struct thread_info *thread = current_thread_info();
4322+ unsigned int domain = thread->cpu_domain;
4323+ /*
4324+ * DOMAIN_MANAGER might be defined to some other value,
4325+ * use the arch-defined constant
4326+ */
4327+ domain &= ~domain_val(dom, 3);
4328+ thread->cpu_domain = domain | domain_val(dom, type);
4329+ set_domain(thread->cpu_domain);
4330+}
4331+EXPORT_SYMBOL(modify_domain);
4332+#endif
4333+
4334 /*
4335 * empty_zero_page is a special page that is used for
4336 * zero-initialized data and COW.
4337@@ -235,7 +251,15 @@ __setup("noalign", noalign_setup);
4338 #define PROT_PTE_S2_DEVICE PROT_PTE_DEVICE
4339 #define PROT_SECT_DEVICE PMD_TYPE_SECT|PMD_SECT_AP_WRITE
4340
4341-static struct mem_type mem_types[] = {
4342+#ifdef CONFIG_PAX_KERNEXEC
4343+#define L_PTE_KERNEXEC L_PTE_RDONLY
4344+#define PMD_SECT_KERNEXEC PMD_SECT_RDONLY
4345+#else
4346+#define L_PTE_KERNEXEC L_PTE_DIRTY
4347+#define PMD_SECT_KERNEXEC PMD_SECT_AP_WRITE
4348+#endif
4349+
4350+static struct mem_type mem_types[] __read_only = {
4351 [MT_DEVICE] = { /* Strongly ordered / ARMv6 shared device */
4352 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED |
4353 L_PTE_SHARED,
4354@@ -264,19 +288,19 @@ static struct mem_type mem_types[] = {
4355 .prot_sect = PROT_SECT_DEVICE,
4356 .domain = DOMAIN_IO,
4357 },
4358- [MT_UNCACHED] = {
4359+ [MT_UNCACHED_RW] = {
4360 .prot_pte = PROT_PTE_DEVICE,
4361 .prot_l1 = PMD_TYPE_TABLE,
4362 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
4363 .domain = DOMAIN_IO,
4364 },
4365- [MT_CACHECLEAN] = {
4366- .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
4367+ [MT_CACHECLEAN_RO] = {
4368+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_RDONLY,
4369 .domain = DOMAIN_KERNEL,
4370 },
4371 #ifndef CONFIG_ARM_LPAE
4372- [MT_MINICLEAN] = {
4373- .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_MINICACHE,
4374+ [MT_MINICLEAN_RO] = {
4375+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_MINICACHE | PMD_SECT_XN | PMD_SECT_RDONLY,
4376 .domain = DOMAIN_KERNEL,
4377 },
4378 #endif
4379@@ -284,15 +308,15 @@ static struct mem_type mem_types[] = {
4380 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4381 L_PTE_RDONLY,
4382 .prot_l1 = PMD_TYPE_TABLE,
4383- .domain = DOMAIN_USER,
4384+ .domain = DOMAIN_VECTORS,
4385 },
4386 [MT_HIGH_VECTORS] = {
4387 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4388 L_PTE_USER | L_PTE_RDONLY,
4389 .prot_l1 = PMD_TYPE_TABLE,
4390- .domain = DOMAIN_USER,
4391+ .domain = DOMAIN_VECTORS,
4392 },
4393- [MT_MEMORY_RWX] = {
4394+ [__MT_MEMORY_RWX] = {
4395 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
4396 .prot_l1 = PMD_TYPE_TABLE,
4397 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
4398@@ -305,17 +329,30 @@ static struct mem_type mem_types[] = {
4399 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
4400 .domain = DOMAIN_KERNEL,
4401 },
4402- [MT_ROM] = {
4403- .prot_sect = PMD_TYPE_SECT,
4404+ [MT_MEMORY_RX] = {
4405+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_KERNEXEC,
4406+ .prot_l1 = PMD_TYPE_TABLE,
4407+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_KERNEXEC,
4408+ .domain = DOMAIN_KERNEL,
4409+ },
4410+ [MT_ROM_RX] = {
4411+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_RDONLY,
4412 .domain = DOMAIN_KERNEL,
4413 },
4414- [MT_MEMORY_RWX_NONCACHED] = {
4415+ [MT_MEMORY_RW_NONCACHED] = {
4416 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4417 L_PTE_MT_BUFFERABLE,
4418 .prot_l1 = PMD_TYPE_TABLE,
4419 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
4420 .domain = DOMAIN_KERNEL,
4421 },
4422+ [MT_MEMORY_RX_NONCACHED] = {
4423+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_KERNEXEC |
4424+ L_PTE_MT_BUFFERABLE,
4425+ .prot_l1 = PMD_TYPE_TABLE,
4426+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_KERNEXEC,
4427+ .domain = DOMAIN_KERNEL,
4428+ },
4429 [MT_MEMORY_RW_DTCM] = {
4430 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4431 L_PTE_XN,
4432@@ -323,9 +360,10 @@ static struct mem_type mem_types[] = {
4433 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
4434 .domain = DOMAIN_KERNEL,
4435 },
4436- [MT_MEMORY_RWX_ITCM] = {
4437- .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
4438+ [MT_MEMORY_RX_ITCM] = {
4439+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_KERNEXEC,
4440 .prot_l1 = PMD_TYPE_TABLE,
4441+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_KERNEXEC,
4442 .domain = DOMAIN_KERNEL,
4443 },
4444 [MT_MEMORY_RW_SO] = {
4445@@ -534,9 +572,14 @@ static void __init build_mem_type_table(void)
4446 * Mark cache clean areas and XIP ROM read only
4447 * from SVC mode and no access from userspace.
4448 */
4449- mem_types[MT_ROM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4450- mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4451- mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4452+ mem_types[MT_ROM_RX].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4453+#ifdef CONFIG_PAX_KERNEXEC
4454+ mem_types[MT_MEMORY_RX].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4455+ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4456+ mem_types[MT_MEMORY_RX_ITCM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4457+#endif
4458+ mem_types[MT_MINICLEAN_RO].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4459+ mem_types[MT_CACHECLEAN_RO].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4460 #endif
4461
4462 if (is_smp()) {
4463@@ -552,13 +595,17 @@ static void __init build_mem_type_table(void)
4464 mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED;
4465 mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S;
4466 mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED;
4467- mem_types[MT_MEMORY_RWX].prot_sect |= PMD_SECT_S;
4468- mem_types[MT_MEMORY_RWX].prot_pte |= L_PTE_SHARED;
4469+ mem_types[__MT_MEMORY_RWX].prot_sect |= PMD_SECT_S;
4470+ mem_types[__MT_MEMORY_RWX].prot_pte |= L_PTE_SHARED;
4471 mem_types[MT_MEMORY_RW].prot_sect |= PMD_SECT_S;
4472 mem_types[MT_MEMORY_RW].prot_pte |= L_PTE_SHARED;
4473+ mem_types[MT_MEMORY_RX].prot_sect |= PMD_SECT_S;
4474+ mem_types[MT_MEMORY_RX].prot_pte |= L_PTE_SHARED;
4475 mem_types[MT_MEMORY_DMA_READY].prot_pte |= L_PTE_SHARED;
4476- mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= PMD_SECT_S;
4477- mem_types[MT_MEMORY_RWX_NONCACHED].prot_pte |= L_PTE_SHARED;
4478+ mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |= PMD_SECT_S;
4479+ mem_types[MT_MEMORY_RW_NONCACHED].prot_pte |= L_PTE_SHARED;
4480+ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |= PMD_SECT_S;
4481+ mem_types[MT_MEMORY_RX_NONCACHED].prot_pte |= L_PTE_SHARED;
4482 }
4483 }
4484
4485@@ -569,15 +616,20 @@ static void __init build_mem_type_table(void)
4486 if (cpu_arch >= CPU_ARCH_ARMv6) {
4487 if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
4488 /* Non-cacheable Normal is XCB = 001 */
4489- mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |=
4490+ mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |=
4491+ PMD_SECT_BUFFERED;
4492+ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |=
4493 PMD_SECT_BUFFERED;
4494 } else {
4495 /* For both ARMv6 and non-TEX-remapping ARMv7 */
4496- mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |=
4497+ mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |=
4498+ PMD_SECT_TEX(1);
4499+ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |=
4500 PMD_SECT_TEX(1);
4501 }
4502 } else {
4503- mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
4504+ mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
4505+ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
4506 }
4507
4508 #ifdef CONFIG_ARM_LPAE
4509@@ -593,6 +645,8 @@ static void __init build_mem_type_table(void)
4510 vecs_pgprot |= PTE_EXT_AF;
4511 #endif
4512
4513+ user_pgprot |= __supported_pte_mask;
4514+
4515 for (i = 0; i < 16; i++) {
4516 pteval_t v = pgprot_val(protection_map[i]);
4517 protection_map[i] = __pgprot(v | user_pgprot);
4518@@ -610,21 +664,24 @@ static void __init build_mem_type_table(void)
4519
4520 mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask;
4521 mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask;
4522- mem_types[MT_MEMORY_RWX].prot_sect |= ecc_mask | cp->pmd;
4523- mem_types[MT_MEMORY_RWX].prot_pte |= kern_pgprot;
4524+ mem_types[__MT_MEMORY_RWX].prot_sect |= ecc_mask | cp->pmd;
4525+ mem_types[__MT_MEMORY_RWX].prot_pte |= kern_pgprot;
4526 mem_types[MT_MEMORY_RW].prot_sect |= ecc_mask | cp->pmd;
4527 mem_types[MT_MEMORY_RW].prot_pte |= kern_pgprot;
4528+ mem_types[MT_MEMORY_RX].prot_sect |= ecc_mask | cp->pmd;
4529+ mem_types[MT_MEMORY_RX].prot_pte |= kern_pgprot;
4530 mem_types[MT_MEMORY_DMA_READY].prot_pte |= kern_pgprot;
4531- mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= ecc_mask;
4532- mem_types[MT_ROM].prot_sect |= cp->pmd;
4533+ mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |= ecc_mask;
4534+ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |= ecc_mask;
4535+ mem_types[MT_ROM_RX].prot_sect |= cp->pmd;
4536
4537 switch (cp->pmd) {
4538 case PMD_SECT_WT:
4539- mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WT;
4540+ mem_types[MT_CACHECLEAN_RO].prot_sect |= PMD_SECT_WT;
4541 break;
4542 case PMD_SECT_WB:
4543 case PMD_SECT_WBWA:
4544- mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WB;
4545+ mem_types[MT_CACHECLEAN_RO].prot_sect |= PMD_SECT_WB;
4546 break;
4547 }
4548 pr_info("Memory policy: %sData cache %s\n",
4549@@ -842,7 +899,7 @@ static void __init create_mapping(struct map_desc *md)
4550 return;
4551 }
4552
4553- if ((md->type == MT_DEVICE || md->type == MT_ROM) &&
4554+ if ((md->type == MT_DEVICE || md->type == MT_ROM_RX) &&
4555 md->virtual >= PAGE_OFFSET &&
4556 (md->virtual < VMALLOC_START || md->virtual >= VMALLOC_END)) {
4557 printk(KERN_WARNING "BUG: mapping for 0x%08llx"
4558@@ -1257,18 +1314,15 @@ void __init arm_mm_memblock_reserve(void)
4559 * called function. This means you can't use any function or debugging
4560 * method which may touch any device, otherwise the kernel _will_ crash.
4561 */
4562+
4563+static char vectors[PAGE_SIZE * 2] __read_only __aligned(PAGE_SIZE);
4564+
4565 static void __init devicemaps_init(const struct machine_desc *mdesc)
4566 {
4567 struct map_desc map;
4568 unsigned long addr;
4569- void *vectors;
4570
4571- /*
4572- * Allocate the vector page early.
4573- */
4574- vectors = early_alloc(PAGE_SIZE * 2);
4575-
4576- early_trap_init(vectors);
4577+ early_trap_init(&vectors);
4578
4579 for (addr = VMALLOC_START; addr; addr += PMD_SIZE)
4580 pmd_clear(pmd_off_k(addr));
4581@@ -1281,7 +1335,7 @@ static void __init devicemaps_init(const struct machine_desc *mdesc)
4582 map.pfn = __phys_to_pfn(CONFIG_XIP_PHYS_ADDR & SECTION_MASK);
4583 map.virtual = MODULES_VADDR;
4584 map.length = ((unsigned long)_etext - map.virtual + ~SECTION_MASK) & SECTION_MASK;
4585- map.type = MT_ROM;
4586+ map.type = MT_ROM_RX;
4587 create_mapping(&map);
4588 #endif
4589
4590@@ -1292,14 +1346,14 @@ static void __init devicemaps_init(const struct machine_desc *mdesc)
4591 map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS);
4592 map.virtual = FLUSH_BASE;
4593 map.length = SZ_1M;
4594- map.type = MT_CACHECLEAN;
4595+ map.type = MT_CACHECLEAN_RO;
4596 create_mapping(&map);
4597 #endif
4598 #ifdef FLUSH_BASE_MINICACHE
4599 map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS + SZ_1M);
4600 map.virtual = FLUSH_BASE_MINICACHE;
4601 map.length = SZ_1M;
4602- map.type = MT_MINICLEAN;
4603+ map.type = MT_MINICLEAN_RO;
4604 create_mapping(&map);
4605 #endif
4606
4607@@ -1308,7 +1362,7 @@ static void __init devicemaps_init(const struct machine_desc *mdesc)
4608 * location (0xffff0000). If we aren't using high-vectors, also
4609 * create a mapping at the low-vectors virtual address.
4610 */
4611- map.pfn = __phys_to_pfn(virt_to_phys(vectors));
4612+ map.pfn = __phys_to_pfn(virt_to_phys(&vectors));
4613 map.virtual = 0xffff0000;
4614 map.length = PAGE_SIZE;
4615 #ifdef CONFIG_KUSER_HELPERS
4616@@ -1365,8 +1419,10 @@ static void __init kmap_init(void)
4617 static void __init map_lowmem(void)
4618 {
4619 struct memblock_region *reg;
4620+#ifndef CONFIG_PAX_KERNEXEC
4621 unsigned long kernel_x_start = round_down(__pa(_stext), SECTION_SIZE);
4622 unsigned long kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE);
4623+#endif
4624
4625 /* Map all the lowmem memory banks. */
4626 for_each_memblock(memory, reg) {
4627@@ -1379,11 +1435,48 @@ static void __init map_lowmem(void)
4628 if (start >= end)
4629 break;
4630
4631+#ifdef CONFIG_PAX_KERNEXEC
4632+ map.pfn = __phys_to_pfn(start);
4633+ map.virtual = __phys_to_virt(start);
4634+ map.length = end - start;
4635+
4636+ if (map.virtual <= (unsigned long)_stext && ((unsigned long)_end < (map.virtual + map.length))) {
4637+ struct map_desc kernel;
4638+ struct map_desc initmap;
4639+
4640+ /* when freeing initmem we will make this RW */
4641+ initmap.pfn = __phys_to_pfn(__pa(__init_begin));
4642+ initmap.virtual = (unsigned long)__init_begin;
4643+ initmap.length = _sdata - __init_begin;
4644+ initmap.type = __MT_MEMORY_RWX;
4645+ create_mapping(&initmap);
4646+
4647+ /* when freeing initmem we will make this RX */
4648+ kernel.pfn = __phys_to_pfn(__pa(_stext));
4649+ kernel.virtual = (unsigned long)_stext;
4650+ kernel.length = __init_begin - _stext;
4651+ kernel.type = __MT_MEMORY_RWX;
4652+ create_mapping(&kernel);
4653+
4654+ if (map.virtual < (unsigned long)_stext) {
4655+ map.length = (unsigned long)_stext - map.virtual;
4656+ map.type = __MT_MEMORY_RWX;
4657+ create_mapping(&map);
4658+ }
4659+
4660+ map.pfn = __phys_to_pfn(__pa(_sdata));
4661+ map.virtual = (unsigned long)_sdata;
4662+ map.length = end - __pa(_sdata);
4663+ }
4664+
4665+ map.type = MT_MEMORY_RW;
4666+ create_mapping(&map);
4667+#else
4668 if (end < kernel_x_start || start >= kernel_x_end) {
4669 map.pfn = __phys_to_pfn(start);
4670 map.virtual = __phys_to_virt(start);
4671 map.length = end - start;
4672- map.type = MT_MEMORY_RWX;
4673+ map.type = __MT_MEMORY_RWX;
4674
4675 create_mapping(&map);
4676 } else {
4677@@ -1400,7 +1493,7 @@ static void __init map_lowmem(void)
4678 map.pfn = __phys_to_pfn(kernel_x_start);
4679 map.virtual = __phys_to_virt(kernel_x_start);
4680 map.length = kernel_x_end - kernel_x_start;
4681- map.type = MT_MEMORY_RWX;
4682+ map.type = __MT_MEMORY_RWX;
4683
4684 create_mapping(&map);
4685
4686@@ -1413,6 +1506,7 @@ static void __init map_lowmem(void)
4687 create_mapping(&map);
4688 }
4689 }
4690+#endif
4691 }
4692 }
4693
4694diff --git a/arch/arm/plat-iop/setup.c b/arch/arm/plat-iop/setup.c
4695index 5b217f4..c23f40e 100644
4696--- a/arch/arm/plat-iop/setup.c
4697+++ b/arch/arm/plat-iop/setup.c
4698@@ -24,7 +24,7 @@ static struct map_desc iop3xx_std_desc[] __initdata = {
4699 .virtual = IOP3XX_PERIPHERAL_VIRT_BASE,
4700 .pfn = __phys_to_pfn(IOP3XX_PERIPHERAL_PHYS_BASE),
4701 .length = IOP3XX_PERIPHERAL_SIZE,
4702- .type = MT_UNCACHED,
4703+ .type = MT_UNCACHED_RW,
4704 },
4705 };
4706
4707diff --git a/arch/arm/plat-omap/sram.c b/arch/arm/plat-omap/sram.c
4708index a5bc92d..0bb4730 100644
4709--- a/arch/arm/plat-omap/sram.c
4710+++ b/arch/arm/plat-omap/sram.c
4711@@ -93,6 +93,8 @@ void __init omap_map_sram(unsigned long start, unsigned long size,
4712 * Looks like we need to preserve some bootloader code at the
4713 * beginning of SRAM for jumping to flash for reboot to work...
4714 */
4715+ pax_open_kernel();
4716 memset_io(omap_sram_base + omap_sram_skip, 0,
4717 omap_sram_size - omap_sram_skip);
4718+ pax_close_kernel();
4719 }
4720diff --git a/arch/arm/plat-samsung/include/plat/dma-ops.h b/arch/arm/plat-samsung/include/plat/dma-ops.h
4721index ce6d763..cfea917 100644
4722--- a/arch/arm/plat-samsung/include/plat/dma-ops.h
4723+++ b/arch/arm/plat-samsung/include/plat/dma-ops.h
4724@@ -47,7 +47,7 @@ struct samsung_dma_ops {
4725 int (*started)(unsigned ch);
4726 int (*flush)(unsigned ch);
4727 int (*stop)(unsigned ch);
4728-};
4729+} __no_const;
4730
4731 extern void *samsung_dmadev_get_ops(void);
4732 extern void *s3c_dma_get_ops(void);
4733diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h
4734index 409ca37..10c87ad 100644
4735--- a/arch/arm64/include/asm/barrier.h
4736+++ b/arch/arm64/include/asm/barrier.h
4737@@ -40,7 +40,7 @@
4738 do { \
4739 compiletime_assert_atomic_type(*p); \
4740 smp_mb(); \
4741- ACCESS_ONCE(*p) = (v); \
4742+ ACCESS_ONCE_RW(*p) = (v); \
4743 } while (0)
4744
4745 #define smp_load_acquire(p) \
4746diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
4747index 6c0f684..5faea9d 100644
4748--- a/arch/arm64/include/asm/uaccess.h
4749+++ b/arch/arm64/include/asm/uaccess.h
4750@@ -99,6 +99,7 @@ static inline void set_fs(mm_segment_t fs)
4751 flag; \
4752 })
4753
4754+#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
4755 #define access_ok(type, addr, size) __range_ok(addr, size)
4756 #define user_addr_max get_fs
4757
4758diff --git a/arch/avr32/include/asm/cache.h b/arch/avr32/include/asm/cache.h
4759index c3a58a1..78fbf54 100644
4760--- a/arch/avr32/include/asm/cache.h
4761+++ b/arch/avr32/include/asm/cache.h
4762@@ -1,8 +1,10 @@
4763 #ifndef __ASM_AVR32_CACHE_H
4764 #define __ASM_AVR32_CACHE_H
4765
4766+#include <linux/const.h>
4767+
4768 #define L1_CACHE_SHIFT 5
4769-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4770+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4771
4772 /*
4773 * Memory returned by kmalloc() may be used for DMA, so we must make
4774diff --git a/arch/avr32/include/asm/elf.h b/arch/avr32/include/asm/elf.h
4775index d232888..87c8df1 100644
4776--- a/arch/avr32/include/asm/elf.h
4777+++ b/arch/avr32/include/asm/elf.h
4778@@ -84,8 +84,14 @@ typedef struct user_fpu_struct elf_fpregset_t;
4779 the loader. We need to make sure that it is out of the way of the program
4780 that it will "exec", and that there is sufficient room for the brk. */
4781
4782-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
4783+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
4784
4785+#ifdef CONFIG_PAX_ASLR
4786+#define PAX_ELF_ET_DYN_BASE 0x00001000UL
4787+
4788+#define PAX_DELTA_MMAP_LEN 15
4789+#define PAX_DELTA_STACK_LEN 15
4790+#endif
4791
4792 /* This yields a mask that user programs can use to figure out what
4793 instruction set this CPU supports. This could be done in user space,
4794diff --git a/arch/avr32/include/asm/kmap_types.h b/arch/avr32/include/asm/kmap_types.h
4795index 479330b..53717a8 100644
4796--- a/arch/avr32/include/asm/kmap_types.h
4797+++ b/arch/avr32/include/asm/kmap_types.h
4798@@ -2,9 +2,9 @@
4799 #define __ASM_AVR32_KMAP_TYPES_H
4800
4801 #ifdef CONFIG_DEBUG_HIGHMEM
4802-# define KM_TYPE_NR 29
4803+# define KM_TYPE_NR 30
4804 #else
4805-# define KM_TYPE_NR 14
4806+# define KM_TYPE_NR 15
4807 #endif
4808
4809 #endif /* __ASM_AVR32_KMAP_TYPES_H */
4810diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c
4811index 0eca933..eb78c7b 100644
4812--- a/arch/avr32/mm/fault.c
4813+++ b/arch/avr32/mm/fault.c
4814@@ -41,6 +41,23 @@ static inline int notify_page_fault(struct pt_regs *regs, int trap)
4815
4816 int exception_trace = 1;
4817
4818+#ifdef CONFIG_PAX_PAGEEXEC
4819+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
4820+{
4821+ unsigned long i;
4822+
4823+ printk(KERN_ERR "PAX: bytes at PC: ");
4824+ for (i = 0; i < 20; i++) {
4825+ unsigned char c;
4826+ if (get_user(c, (unsigned char *)pc+i))
4827+ printk(KERN_CONT "???????? ");
4828+ else
4829+ printk(KERN_CONT "%02x ", c);
4830+ }
4831+ printk("\n");
4832+}
4833+#endif
4834+
4835 /*
4836 * This routine handles page faults. It determines the address and the
4837 * problem, and then passes it off to one of the appropriate routines.
4838@@ -176,6 +193,16 @@ bad_area:
4839 up_read(&mm->mmap_sem);
4840
4841 if (user_mode(regs)) {
4842+
4843+#ifdef CONFIG_PAX_PAGEEXEC
4844+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
4845+ if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
4846+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
4847+ do_group_exit(SIGKILL);
4848+ }
4849+ }
4850+#endif
4851+
4852 if (exception_trace && printk_ratelimit())
4853 printk("%s%s[%d]: segfault at %08lx pc %08lx "
4854 "sp %08lx ecr %lu\n",
4855diff --git a/arch/blackfin/include/asm/cache.h b/arch/blackfin/include/asm/cache.h
4856index 568885a..f8008df 100644
4857--- a/arch/blackfin/include/asm/cache.h
4858+++ b/arch/blackfin/include/asm/cache.h
4859@@ -7,6 +7,7 @@
4860 #ifndef __ARCH_BLACKFIN_CACHE_H
4861 #define __ARCH_BLACKFIN_CACHE_H
4862
4863+#include <linux/const.h>
4864 #include <linux/linkage.h> /* for asmlinkage */
4865
4866 /*
4867@@ -14,7 +15,7 @@
4868 * Blackfin loads 32 bytes for cache
4869 */
4870 #define L1_CACHE_SHIFT 5
4871-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4872+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4873 #define SMP_CACHE_BYTES L1_CACHE_BYTES
4874
4875 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
4876diff --git a/arch/cris/include/arch-v10/arch/cache.h b/arch/cris/include/arch-v10/arch/cache.h
4877index aea2718..3639a60 100644
4878--- a/arch/cris/include/arch-v10/arch/cache.h
4879+++ b/arch/cris/include/arch-v10/arch/cache.h
4880@@ -1,8 +1,9 @@
4881 #ifndef _ASM_ARCH_CACHE_H
4882 #define _ASM_ARCH_CACHE_H
4883
4884+#include <linux/const.h>
4885 /* Etrax 100LX have 32-byte cache-lines. */
4886-#define L1_CACHE_BYTES 32
4887 #define L1_CACHE_SHIFT 5
4888+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4889
4890 #endif /* _ASM_ARCH_CACHE_H */
4891diff --git a/arch/cris/include/arch-v32/arch/cache.h b/arch/cris/include/arch-v32/arch/cache.h
4892index 7caf25d..ee65ac5 100644
4893--- a/arch/cris/include/arch-v32/arch/cache.h
4894+++ b/arch/cris/include/arch-v32/arch/cache.h
4895@@ -1,11 +1,12 @@
4896 #ifndef _ASM_CRIS_ARCH_CACHE_H
4897 #define _ASM_CRIS_ARCH_CACHE_H
4898
4899+#include <linux/const.h>
4900 #include <arch/hwregs/dma.h>
4901
4902 /* A cache-line is 32 bytes. */
4903-#define L1_CACHE_BYTES 32
4904 #define L1_CACHE_SHIFT 5
4905+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4906
4907 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
4908
4909diff --git a/arch/frv/include/asm/atomic.h b/arch/frv/include/asm/atomic.h
4910index b86329d..6709906 100644
4911--- a/arch/frv/include/asm/atomic.h
4912+++ b/arch/frv/include/asm/atomic.h
4913@@ -186,6 +186,16 @@ static inline void atomic64_dec(atomic64_t *v)
4914 #define atomic64_cmpxchg(v, old, new) (__cmpxchg_64(old, new, &(v)->counter))
4915 #define atomic64_xchg(v, new) (__xchg_64(new, &(v)->counter))
4916
4917+#define atomic64_read_unchecked(v) atomic64_read(v)
4918+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
4919+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
4920+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
4921+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
4922+#define atomic64_inc_unchecked(v) atomic64_inc(v)
4923+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
4924+#define atomic64_dec_unchecked(v) atomic64_dec(v)
4925+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
4926+
4927 static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
4928 {
4929 int c, old;
4930diff --git a/arch/frv/include/asm/cache.h b/arch/frv/include/asm/cache.h
4931index 2797163..c2a401d 100644
4932--- a/arch/frv/include/asm/cache.h
4933+++ b/arch/frv/include/asm/cache.h
4934@@ -12,10 +12,11 @@
4935 #ifndef __ASM_CACHE_H
4936 #define __ASM_CACHE_H
4937
4938+#include <linux/const.h>
4939
4940 /* bytes per L1 cache line */
4941 #define L1_CACHE_SHIFT (CONFIG_FRV_L1_CACHE_SHIFT)
4942-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4943+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4944
4945 #define __cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
4946 #define ____cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
4947diff --git a/arch/frv/include/asm/kmap_types.h b/arch/frv/include/asm/kmap_types.h
4948index 43901f2..0d8b865 100644
4949--- a/arch/frv/include/asm/kmap_types.h
4950+++ b/arch/frv/include/asm/kmap_types.h
4951@@ -2,6 +2,6 @@
4952 #ifndef _ASM_KMAP_TYPES_H
4953 #define _ASM_KMAP_TYPES_H
4954
4955-#define KM_TYPE_NR 17
4956+#define KM_TYPE_NR 18
4957
4958 #endif
4959diff --git a/arch/frv/mm/elf-fdpic.c b/arch/frv/mm/elf-fdpic.c
4960index 836f147..4cf23f5 100644
4961--- a/arch/frv/mm/elf-fdpic.c
4962+++ b/arch/frv/mm/elf-fdpic.c
4963@@ -61,6 +61,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
4964 {
4965 struct vm_area_struct *vma;
4966 struct vm_unmapped_area_info info;
4967+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
4968
4969 if (len > TASK_SIZE)
4970 return -ENOMEM;
4971@@ -73,8 +74,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
4972 if (addr) {
4973 addr = PAGE_ALIGN(addr);
4974 vma = find_vma(current->mm, addr);
4975- if (TASK_SIZE - len >= addr &&
4976- (!vma || addr + len <= vma->vm_start))
4977+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
4978 goto success;
4979 }
4980
4981@@ -85,6 +85,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
4982 info.high_limit = (current->mm->start_stack - 0x00200000);
4983 info.align_mask = 0;
4984 info.align_offset = 0;
4985+ info.threadstack_offset = offset;
4986 addr = vm_unmapped_area(&info);
4987 if (!(addr & ~PAGE_MASK))
4988 goto success;
4989diff --git a/arch/hexagon/include/asm/cache.h b/arch/hexagon/include/asm/cache.h
4990index f4ca594..adc72fd6 100644
4991--- a/arch/hexagon/include/asm/cache.h
4992+++ b/arch/hexagon/include/asm/cache.h
4993@@ -21,9 +21,11 @@
4994 #ifndef __ASM_CACHE_H
4995 #define __ASM_CACHE_H
4996
4997+#include <linux/const.h>
4998+
4999 /* Bytes per L1 cache line */
5000-#define L1_CACHE_SHIFT (5)
5001-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5002+#define L1_CACHE_SHIFT 5
5003+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5004
5005 #define __cacheline_aligned __aligned(L1_CACHE_BYTES)
5006 #define ____cacheline_aligned __aligned(L1_CACHE_BYTES)
5007diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
5008index 0c8e553..112d734 100644
5009--- a/arch/ia64/Kconfig
5010+++ b/arch/ia64/Kconfig
5011@@ -544,6 +544,7 @@ source "drivers/sn/Kconfig"
5012 config KEXEC
5013 bool "kexec system call"
5014 depends on !IA64_HP_SIM && (!SMP || HOTPLUG_CPU)
5015+ depends on !GRKERNSEC_KMEM
5016 help
5017 kexec is a system call that implements the ability to shutdown your
5018 current kernel, and to start another kernel. It is like a reboot
5019diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h
5020index 6e6fe18..a6ae668 100644
5021--- a/arch/ia64/include/asm/atomic.h
5022+++ b/arch/ia64/include/asm/atomic.h
5023@@ -208,6 +208,16 @@ atomic64_add_negative (__s64 i, atomic64_t *v)
5024 #define atomic64_inc(v) atomic64_add(1, (v))
5025 #define atomic64_dec(v) atomic64_sub(1, (v))
5026
5027+#define atomic64_read_unchecked(v) atomic64_read(v)
5028+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
5029+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
5030+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
5031+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
5032+#define atomic64_inc_unchecked(v) atomic64_inc(v)
5033+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
5034+#define atomic64_dec_unchecked(v) atomic64_dec(v)
5035+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
5036+
5037 /* Atomic operations are already serializing */
5038 #define smp_mb__before_atomic_dec() barrier()
5039 #define smp_mb__after_atomic_dec() barrier()
5040diff --git a/arch/ia64/include/asm/barrier.h b/arch/ia64/include/asm/barrier.h
5041index d0a69aa..142f878 100644
5042--- a/arch/ia64/include/asm/barrier.h
5043+++ b/arch/ia64/include/asm/barrier.h
5044@@ -64,7 +64,7 @@
5045 do { \
5046 compiletime_assert_atomic_type(*p); \
5047 barrier(); \
5048- ACCESS_ONCE(*p) = (v); \
5049+ ACCESS_ONCE_RW(*p) = (v); \
5050 } while (0)
5051
5052 #define smp_load_acquire(p) \
5053diff --git a/arch/ia64/include/asm/cache.h b/arch/ia64/include/asm/cache.h
5054index 988254a..e1ee885 100644
5055--- a/arch/ia64/include/asm/cache.h
5056+++ b/arch/ia64/include/asm/cache.h
5057@@ -1,6 +1,7 @@
5058 #ifndef _ASM_IA64_CACHE_H
5059 #define _ASM_IA64_CACHE_H
5060
5061+#include <linux/const.h>
5062
5063 /*
5064 * Copyright (C) 1998-2000 Hewlett-Packard Co
5065@@ -9,7 +10,7 @@
5066
5067 /* Bytes per L1 (data) cache line. */
5068 #define L1_CACHE_SHIFT CONFIG_IA64_L1_CACHE_SHIFT
5069-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5070+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5071
5072 #ifdef CONFIG_SMP
5073 # define SMP_CACHE_SHIFT L1_CACHE_SHIFT
5074diff --git a/arch/ia64/include/asm/elf.h b/arch/ia64/include/asm/elf.h
5075index 5a83c5c..4d7f553 100644
5076--- a/arch/ia64/include/asm/elf.h
5077+++ b/arch/ia64/include/asm/elf.h
5078@@ -42,6 +42,13 @@
5079 */
5080 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
5081
5082+#ifdef CONFIG_PAX_ASLR
5083+#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
5084+
5085+#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
5086+#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
5087+#endif
5088+
5089 #define PT_IA_64_UNWIND 0x70000001
5090
5091 /* IA-64 relocations: */
5092diff --git a/arch/ia64/include/asm/pgalloc.h b/arch/ia64/include/asm/pgalloc.h
5093index 5767cdf..7462574 100644
5094--- a/arch/ia64/include/asm/pgalloc.h
5095+++ b/arch/ia64/include/asm/pgalloc.h
5096@@ -39,6 +39,12 @@ pgd_populate(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
5097 pgd_val(*pgd_entry) = __pa(pud);
5098 }
5099
5100+static inline void
5101+pgd_populate_kernel(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
5102+{
5103+ pgd_populate(mm, pgd_entry, pud);
5104+}
5105+
5106 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
5107 {
5108 return quicklist_alloc(0, GFP_KERNEL, NULL);
5109@@ -57,6 +63,12 @@ pud_populate(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
5110 pud_val(*pud_entry) = __pa(pmd);
5111 }
5112
5113+static inline void
5114+pud_populate_kernel(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
5115+{
5116+ pud_populate(mm, pud_entry, pmd);
5117+}
5118+
5119 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
5120 {
5121 return quicklist_alloc(0, GFP_KERNEL, NULL);
5122diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
5123index 7935115..c0eca6a 100644
5124--- a/arch/ia64/include/asm/pgtable.h
5125+++ b/arch/ia64/include/asm/pgtable.h
5126@@ -12,7 +12,7 @@
5127 * David Mosberger-Tang <davidm@hpl.hp.com>
5128 */
5129
5130-
5131+#include <linux/const.h>
5132 #include <asm/mman.h>
5133 #include <asm/page.h>
5134 #include <asm/processor.h>
5135@@ -142,6 +142,17 @@
5136 #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
5137 #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
5138 #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
5139+
5140+#ifdef CONFIG_PAX_PAGEEXEC
5141+# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
5142+# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
5143+# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
5144+#else
5145+# define PAGE_SHARED_NOEXEC PAGE_SHARED
5146+# define PAGE_READONLY_NOEXEC PAGE_READONLY
5147+# define PAGE_COPY_NOEXEC PAGE_COPY
5148+#endif
5149+
5150 #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
5151 #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
5152 #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
5153diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h
5154index 45698cd..e8e2dbc 100644
5155--- a/arch/ia64/include/asm/spinlock.h
5156+++ b/arch/ia64/include/asm/spinlock.h
5157@@ -71,7 +71,7 @@ static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
5158 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
5159
5160 asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
5161- ACCESS_ONCE(*p) = (tmp + 2) & ~1;
5162+ ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
5163 }
5164
5165 static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
5166diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h
5167index 449c8c0..3d4b1e9 100644
5168--- a/arch/ia64/include/asm/uaccess.h
5169+++ b/arch/ia64/include/asm/uaccess.h
5170@@ -70,6 +70,7 @@
5171 && ((segment).seg == KERNEL_DS.seg \
5172 || likely(REGION_OFFSET((unsigned long) (addr)) < RGN_MAP_LIMIT))); \
5173 })
5174+#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
5175 #define access_ok(type, addr, size) __access_ok((addr), (size), get_fs())
5176
5177 /*
5178@@ -240,12 +241,24 @@ extern unsigned long __must_check __copy_user (void __user *to, const void __use
5179 static inline unsigned long
5180 __copy_to_user (void __user *to, const void *from, unsigned long count)
5181 {
5182+ if (count > INT_MAX)
5183+ return count;
5184+
5185+ if (!__builtin_constant_p(count))
5186+ check_object_size(from, count, true);
5187+
5188 return __copy_user(to, (__force void __user *) from, count);
5189 }
5190
5191 static inline unsigned long
5192 __copy_from_user (void *to, const void __user *from, unsigned long count)
5193 {
5194+ if (count > INT_MAX)
5195+ return count;
5196+
5197+ if (!__builtin_constant_p(count))
5198+ check_object_size(to, count, false);
5199+
5200 return __copy_user((__force void __user *) to, from, count);
5201 }
5202
5203@@ -255,10 +268,13 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
5204 ({ \
5205 void __user *__cu_to = (to); \
5206 const void *__cu_from = (from); \
5207- long __cu_len = (n); \
5208+ unsigned long __cu_len = (n); \
5209 \
5210- if (__access_ok(__cu_to, __cu_len, get_fs())) \
5211+ if (__cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) { \
5212+ if (!__builtin_constant_p(n)) \
5213+ check_object_size(__cu_from, __cu_len, true); \
5214 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
5215+ } \
5216 __cu_len; \
5217 })
5218
5219@@ -266,11 +282,14 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
5220 ({ \
5221 void *__cu_to = (to); \
5222 const void __user *__cu_from = (from); \
5223- long __cu_len = (n); \
5224+ unsigned long __cu_len = (n); \
5225 \
5226 __chk_user_ptr(__cu_from); \
5227- if (__access_ok(__cu_from, __cu_len, get_fs())) \
5228+ if (__cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) { \
5229+ if (!__builtin_constant_p(n)) \
5230+ check_object_size(__cu_to, __cu_len, false); \
5231 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
5232+ } \
5233 __cu_len; \
5234 })
5235
5236diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
5237index 24603be..948052d 100644
5238--- a/arch/ia64/kernel/module.c
5239+++ b/arch/ia64/kernel/module.c
5240@@ -307,8 +307,7 @@ plt_target (struct plt_entry *plt)
5241 void
5242 module_free (struct module *mod, void *module_region)
5243 {
5244- if (mod && mod->arch.init_unw_table &&
5245- module_region == mod->module_init) {
5246+ if (mod && mod->arch.init_unw_table && module_region == mod->module_init_rx) {
5247 unw_remove_unwind_table(mod->arch.init_unw_table);
5248 mod->arch.init_unw_table = NULL;
5249 }
5250@@ -494,15 +493,39 @@ module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings,
5251 }
5252
5253 static inline int
5254+in_init_rx (const struct module *mod, uint64_t addr)
5255+{
5256+ return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
5257+}
5258+
5259+static inline int
5260+in_init_rw (const struct module *mod, uint64_t addr)
5261+{
5262+ return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
5263+}
5264+
5265+static inline int
5266 in_init (const struct module *mod, uint64_t addr)
5267 {
5268- return addr - (uint64_t) mod->module_init < mod->init_size;
5269+ return in_init_rx(mod, addr) || in_init_rw(mod, addr);
5270+}
5271+
5272+static inline int
5273+in_core_rx (const struct module *mod, uint64_t addr)
5274+{
5275+ return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
5276+}
5277+
5278+static inline int
5279+in_core_rw (const struct module *mod, uint64_t addr)
5280+{
5281+ return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
5282 }
5283
5284 static inline int
5285 in_core (const struct module *mod, uint64_t addr)
5286 {
5287- return addr - (uint64_t) mod->module_core < mod->core_size;
5288+ return in_core_rx(mod, addr) || in_core_rw(mod, addr);
5289 }
5290
5291 static inline int
5292@@ -685,7 +708,14 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend,
5293 break;
5294
5295 case RV_BDREL:
5296- val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
5297+ if (in_init_rx(mod, val))
5298+ val -= (uint64_t) mod->module_init_rx;
5299+ else if (in_init_rw(mod, val))
5300+ val -= (uint64_t) mod->module_init_rw;
5301+ else if (in_core_rx(mod, val))
5302+ val -= (uint64_t) mod->module_core_rx;
5303+ else if (in_core_rw(mod, val))
5304+ val -= (uint64_t) mod->module_core_rw;
5305 break;
5306
5307 case RV_LTV:
5308@@ -820,15 +850,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symind
5309 * addresses have been selected...
5310 */
5311 uint64_t gp;
5312- if (mod->core_size > MAX_LTOFF)
5313+ if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
5314 /*
5315 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
5316 * at the end of the module.
5317 */
5318- gp = mod->core_size - MAX_LTOFF / 2;
5319+ gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
5320 else
5321- gp = mod->core_size / 2;
5322- gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
5323+ gp = (mod->core_size_rx + mod->core_size_rw) / 2;
5324+ gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
5325 mod->arch.gp = gp;
5326 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
5327 }
5328diff --git a/arch/ia64/kernel/palinfo.c b/arch/ia64/kernel/palinfo.c
5329index ab33328..f39506c 100644
5330--- a/arch/ia64/kernel/palinfo.c
5331+++ b/arch/ia64/kernel/palinfo.c
5332@@ -980,7 +980,7 @@ static int palinfo_cpu_callback(struct notifier_block *nfb,
5333 return NOTIFY_OK;
5334 }
5335
5336-static struct notifier_block __refdata palinfo_cpu_notifier =
5337+static struct notifier_block palinfo_cpu_notifier =
5338 {
5339 .notifier_call = palinfo_cpu_callback,
5340 .priority = 0,
5341diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c
5342index 41e33f8..65180b2a 100644
5343--- a/arch/ia64/kernel/sys_ia64.c
5344+++ b/arch/ia64/kernel/sys_ia64.c
5345@@ -28,6 +28,7 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
5346 unsigned long align_mask = 0;
5347 struct mm_struct *mm = current->mm;
5348 struct vm_unmapped_area_info info;
5349+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
5350
5351 if (len > RGN_MAP_LIMIT)
5352 return -ENOMEM;
5353@@ -43,6 +44,13 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
5354 if (REGION_NUMBER(addr) == RGN_HPAGE)
5355 addr = 0;
5356 #endif
5357+
5358+#ifdef CONFIG_PAX_RANDMMAP
5359+ if (mm->pax_flags & MF_PAX_RANDMMAP)
5360+ addr = mm->free_area_cache;
5361+ else
5362+#endif
5363+
5364 if (!addr)
5365 addr = TASK_UNMAPPED_BASE;
5366
5367@@ -61,6 +69,7 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
5368 info.high_limit = TASK_SIZE;
5369 info.align_mask = align_mask;
5370 info.align_offset = 0;
5371+ info.threadstack_offset = offset;
5372 return vm_unmapped_area(&info);
5373 }
5374
5375diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
5376index 84f8a52..7c76178 100644
5377--- a/arch/ia64/kernel/vmlinux.lds.S
5378+++ b/arch/ia64/kernel/vmlinux.lds.S
5379@@ -192,7 +192,7 @@ SECTIONS {
5380 /* Per-cpu data: */
5381 . = ALIGN(PERCPU_PAGE_SIZE);
5382 PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu)
5383- __phys_per_cpu_start = __per_cpu_load;
5384+ __phys_per_cpu_start = per_cpu_load;
5385 /*
5386 * ensure percpu data fits
5387 * into percpu page size
5388diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
5389index 7225dad..2a7c8256 100644
5390--- a/arch/ia64/mm/fault.c
5391+++ b/arch/ia64/mm/fault.c
5392@@ -72,6 +72,23 @@ mapped_kernel_page_is_present (unsigned long address)
5393 return pte_present(pte);
5394 }
5395
5396+#ifdef CONFIG_PAX_PAGEEXEC
5397+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
5398+{
5399+ unsigned long i;
5400+
5401+ printk(KERN_ERR "PAX: bytes at PC: ");
5402+ for (i = 0; i < 8; i++) {
5403+ unsigned int c;
5404+ if (get_user(c, (unsigned int *)pc+i))
5405+ printk(KERN_CONT "???????? ");
5406+ else
5407+ printk(KERN_CONT "%08x ", c);
5408+ }
5409+ printk("\n");
5410+}
5411+#endif
5412+
5413 # define VM_READ_BIT 0
5414 # define VM_WRITE_BIT 1
5415 # define VM_EXEC_BIT 2
5416@@ -151,8 +168,21 @@ retry:
5417 if (((isr >> IA64_ISR_R_BIT) & 1UL) && (!(vma->vm_flags & (VM_READ | VM_WRITE))))
5418 goto bad_area;
5419
5420- if ((vma->vm_flags & mask) != mask)
5421+ if ((vma->vm_flags & mask) != mask) {
5422+
5423+#ifdef CONFIG_PAX_PAGEEXEC
5424+ if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
5425+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
5426+ goto bad_area;
5427+
5428+ up_read(&mm->mmap_sem);
5429+ pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
5430+ do_group_exit(SIGKILL);
5431+ }
5432+#endif
5433+
5434 goto bad_area;
5435+ }
5436
5437 /*
5438 * If for any reason at all we couldn't handle the fault, make
5439diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
5440index 68232db..6ca80af 100644
5441--- a/arch/ia64/mm/hugetlbpage.c
5442+++ b/arch/ia64/mm/hugetlbpage.c
5443@@ -154,6 +154,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
5444 unsigned long pgoff, unsigned long flags)
5445 {
5446 struct vm_unmapped_area_info info;
5447+ unsigned long offset = gr_rand_threadstack_offset(current->mm, file, flags);
5448
5449 if (len > RGN_MAP_LIMIT)
5450 return -ENOMEM;
5451@@ -177,6 +178,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
5452 info.high_limit = HPAGE_REGION_BASE + RGN_MAP_LIMIT;
5453 info.align_mask = PAGE_MASK & (HPAGE_SIZE - 1);
5454 info.align_offset = 0;
5455+ info.threadstack_offset = offset;
5456 return vm_unmapped_area(&info);
5457 }
5458
5459diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
5460index 25c3502..560dae7 100644
5461--- a/arch/ia64/mm/init.c
5462+++ b/arch/ia64/mm/init.c
5463@@ -120,6 +120,19 @@ ia64_init_addr_space (void)
5464 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
5465 vma->vm_end = vma->vm_start + PAGE_SIZE;
5466 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
5467+
5468+#ifdef CONFIG_PAX_PAGEEXEC
5469+ if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
5470+ vma->vm_flags &= ~VM_EXEC;
5471+
5472+#ifdef CONFIG_PAX_MPROTECT
5473+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
5474+ vma->vm_flags &= ~VM_MAYEXEC;
5475+#endif
5476+
5477+ }
5478+#endif
5479+
5480 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
5481 down_write(&current->mm->mmap_sem);
5482 if (insert_vm_struct(current->mm, vma)) {
5483diff --git a/arch/m32r/include/asm/cache.h b/arch/m32r/include/asm/cache.h
5484index 40b3ee9..8c2c112 100644
5485--- a/arch/m32r/include/asm/cache.h
5486+++ b/arch/m32r/include/asm/cache.h
5487@@ -1,8 +1,10 @@
5488 #ifndef _ASM_M32R_CACHE_H
5489 #define _ASM_M32R_CACHE_H
5490
5491+#include <linux/const.h>
5492+
5493 /* L1 cache line size */
5494 #define L1_CACHE_SHIFT 4
5495-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5496+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5497
5498 #endif /* _ASM_M32R_CACHE_H */
5499diff --git a/arch/m32r/lib/usercopy.c b/arch/m32r/lib/usercopy.c
5500index 82abd15..d95ae5d 100644
5501--- a/arch/m32r/lib/usercopy.c
5502+++ b/arch/m32r/lib/usercopy.c
5503@@ -14,6 +14,9 @@
5504 unsigned long
5505 __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
5506 {
5507+ if ((long)n < 0)
5508+ return n;
5509+
5510 prefetch(from);
5511 if (access_ok(VERIFY_WRITE, to, n))
5512 __copy_user(to,from,n);
5513@@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
5514 unsigned long
5515 __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
5516 {
5517+ if ((long)n < 0)
5518+ return n;
5519+
5520 prefetchw(to);
5521 if (access_ok(VERIFY_READ, from, n))
5522 __copy_user_zeroing(to,from,n);
5523diff --git a/arch/m68k/include/asm/cache.h b/arch/m68k/include/asm/cache.h
5524index 0395c51..5f26031 100644
5525--- a/arch/m68k/include/asm/cache.h
5526+++ b/arch/m68k/include/asm/cache.h
5527@@ -4,9 +4,11 @@
5528 #ifndef __ARCH_M68K_CACHE_H
5529 #define __ARCH_M68K_CACHE_H
5530
5531+#include <linux/const.h>
5532+
5533 /* bytes per L1 cache line */
5534 #define L1_CACHE_SHIFT 4
5535-#define L1_CACHE_BYTES (1<< L1_CACHE_SHIFT)
5536+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5537
5538 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
5539
5540diff --git a/arch/metag/include/asm/barrier.h b/arch/metag/include/asm/barrier.h
5541index 2d6f0de..de5f5ac 100644
5542--- a/arch/metag/include/asm/barrier.h
5543+++ b/arch/metag/include/asm/barrier.h
5544@@ -89,7 +89,7 @@ static inline void fence(void)
5545 do { \
5546 compiletime_assert_atomic_type(*p); \
5547 smp_mb(); \
5548- ACCESS_ONCE(*p) = (v); \
5549+ ACCESS_ONCE_RW(*p) = (v); \
5550 } while (0)
5551
5552 #define smp_load_acquire(p) \
5553diff --git a/arch/metag/mm/hugetlbpage.c b/arch/metag/mm/hugetlbpage.c
5554index 0424315..defcca9 100644
5555--- a/arch/metag/mm/hugetlbpage.c
5556+++ b/arch/metag/mm/hugetlbpage.c
5557@@ -205,6 +205,7 @@ hugetlb_get_unmapped_area_new_pmd(unsigned long len)
5558 info.high_limit = TASK_SIZE;
5559 info.align_mask = PAGE_MASK & HUGEPT_MASK;
5560 info.align_offset = 0;
5561+ info.threadstack_offset = 0;
5562 return vm_unmapped_area(&info);
5563 }
5564
5565diff --git a/arch/microblaze/include/asm/cache.h b/arch/microblaze/include/asm/cache.h
5566index 4efe96a..60e8699 100644
5567--- a/arch/microblaze/include/asm/cache.h
5568+++ b/arch/microblaze/include/asm/cache.h
5569@@ -13,11 +13,12 @@
5570 #ifndef _ASM_MICROBLAZE_CACHE_H
5571 #define _ASM_MICROBLAZE_CACHE_H
5572
5573+#include <linux/const.h>
5574 #include <asm/registers.h>
5575
5576 #define L1_CACHE_SHIFT 5
5577 /* word-granular cache in microblaze */
5578-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5579+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5580
5581 #define SMP_CACHE_BYTES L1_CACHE_BYTES
5582
5583diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
5584index 95fa1f1..56a6fa2 100644
5585--- a/arch/mips/Kconfig
5586+++ b/arch/mips/Kconfig
5587@@ -2298,6 +2298,7 @@ source "kernel/Kconfig.preempt"
5588
5589 config KEXEC
5590 bool "Kexec system call"
5591+ depends on !GRKERNSEC_KMEM
5592 help
5593 kexec is a system call that implements the ability to shutdown your
5594 current kernel, and to start another kernel. It is like a reboot
5595diff --git a/arch/mips/cavium-octeon/dma-octeon.c b/arch/mips/cavium-octeon/dma-octeon.c
5596index 02f2444..506969c 100644
5597--- a/arch/mips/cavium-octeon/dma-octeon.c
5598+++ b/arch/mips/cavium-octeon/dma-octeon.c
5599@@ -199,7 +199,7 @@ static void octeon_dma_free_coherent(struct device *dev, size_t size,
5600 if (dma_release_from_coherent(dev, order, vaddr))
5601 return;
5602
5603- swiotlb_free_coherent(dev, size, vaddr, dma_handle);
5604+ swiotlb_free_coherent(dev, size, vaddr, dma_handle, attrs);
5605 }
5606
5607 static dma_addr_t octeon_unity_phys_to_dma(struct device *dev, phys_addr_t paddr)
5608diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
5609index 7eed2f2..c4e385d 100644
5610--- a/arch/mips/include/asm/atomic.h
5611+++ b/arch/mips/include/asm/atomic.h
5612@@ -21,15 +21,39 @@
5613 #include <asm/cmpxchg.h>
5614 #include <asm/war.h>
5615
5616+#ifdef CONFIG_GENERIC_ATOMIC64
5617+#include <asm-generic/atomic64.h>
5618+#endif
5619+
5620 #define ATOMIC_INIT(i) { (i) }
5621
5622+#ifdef CONFIG_64BIT
5623+#define _ASM_EXTABLE(from, to) \
5624+" .section __ex_table,\"a\"\n" \
5625+" .dword " #from ", " #to"\n" \
5626+" .previous\n"
5627+#else
5628+#define _ASM_EXTABLE(from, to) \
5629+" .section __ex_table,\"a\"\n" \
5630+" .word " #from ", " #to"\n" \
5631+" .previous\n"
5632+#endif
5633+
5634 /*
5635 * atomic_read - read atomic variable
5636 * @v: pointer of type atomic_t
5637 *
5638 * Atomically reads the value of @v.
5639 */
5640-#define atomic_read(v) (*(volatile int *)&(v)->counter)
5641+static inline int atomic_read(const atomic_t *v)
5642+{
5643+ return (*(volatile const int *) &v->counter);
5644+}
5645+
5646+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
5647+{
5648+ return (*(volatile const int *) &v->counter);
5649+}
5650
5651 /*
5652 * atomic_set - set atomic variable
5653@@ -38,7 +62,15 @@
5654 *
5655 * Atomically sets the value of @v to @i.
5656 */
5657-#define atomic_set(v, i) ((v)->counter = (i))
5658+static inline void atomic_set(atomic_t *v, int i)
5659+{
5660+ v->counter = i;
5661+}
5662+
5663+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
5664+{
5665+ v->counter = i;
5666+}
5667
5668 /*
5669 * atomic_add - add integer to atomic variable
5670@@ -47,7 +79,67 @@
5671 *
5672 * Atomically adds @i to @v.
5673 */
5674-static __inline__ void atomic_add(int i, atomic_t * v)
5675+static __inline__ void atomic_add(int i, atomic_t *v)
5676+{
5677+ int temp;
5678+
5679+ if (kernel_uses_llsc && R10000_LLSC_WAR) {
5680+ __asm__ __volatile__(
5681+ " .set mips3 \n"
5682+ "1: ll %0, %1 # atomic_add \n"
5683+#ifdef CONFIG_PAX_REFCOUNT
5684+ /* Exception on overflow. */
5685+ "2: add %0, %2 \n"
5686+#else
5687+ " addu %0, %2 \n"
5688+#endif
5689+ " sc %0, %1 \n"
5690+ " beqzl %0, 1b \n"
5691+#ifdef CONFIG_PAX_REFCOUNT
5692+ "3: \n"
5693+ _ASM_EXTABLE(2b, 3b)
5694+#endif
5695+ " .set mips0 \n"
5696+ : "=&r" (temp), "+m" (v->counter)
5697+ : "Ir" (i));
5698+ } else if (kernel_uses_llsc) {
5699+ __asm__ __volatile__(
5700+ " .set mips3 \n"
5701+ "1: ll %0, %1 # atomic_add \n"
5702+#ifdef CONFIG_PAX_REFCOUNT
5703+ /* Exception on overflow. */
5704+ "2: add %0, %2 \n"
5705+#else
5706+ " addu %0, %2 \n"
5707+#endif
5708+ " sc %0, %1 \n"
5709+ " beqz %0, 1b \n"
5710+#ifdef CONFIG_PAX_REFCOUNT
5711+ "3: \n"
5712+ _ASM_EXTABLE(2b, 3b)
5713+#endif
5714+ " .set mips0 \n"
5715+ : "=&r" (temp), "+m" (v->counter)
5716+ : "Ir" (i));
5717+ } else {
5718+ unsigned long flags;
5719+
5720+ raw_local_irq_save(flags);
5721+ __asm__ __volatile__(
5722+#ifdef CONFIG_PAX_REFCOUNT
5723+ /* Exception on overflow. */
5724+ "1: add %0, %1 \n"
5725+ "2: \n"
5726+ _ASM_EXTABLE(1b, 2b)
5727+#else
5728+ " addu %0, %1 \n"
5729+#endif
5730+ : "+r" (v->counter) : "Ir" (i));
5731+ raw_local_irq_restore(flags);
5732+ }
5733+}
5734+
5735+static __inline__ void atomic_add_unchecked(int i, atomic_unchecked_t *v)
5736 {
5737 if (kernel_uses_llsc && R10000_LLSC_WAR) {
5738 int temp;
5739@@ -90,7 +182,67 @@ static __inline__ void atomic_add(int i, atomic_t * v)
5740 *
5741 * Atomically subtracts @i from @v.
5742 */
5743-static __inline__ void atomic_sub(int i, atomic_t * v)
5744+static __inline__ void atomic_sub(int i, atomic_t *v)
5745+{
5746+ int temp;
5747+
5748+ if (kernel_uses_llsc && R10000_LLSC_WAR) {
5749+ __asm__ __volatile__(
5750+ " .set mips3 \n"
5751+ "1: ll %0, %1 # atomic64_sub \n"
5752+#ifdef CONFIG_PAX_REFCOUNT
5753+ /* Exception on overflow. */
5754+ "2: sub %0, %2 \n"
5755+#else
5756+ " subu %0, %2 \n"
5757+#endif
5758+ " sc %0, %1 \n"
5759+ " beqzl %0, 1b \n"
5760+#ifdef CONFIG_PAX_REFCOUNT
5761+ "3: \n"
5762+ _ASM_EXTABLE(2b, 3b)
5763+#endif
5764+ " .set mips0 \n"
5765+ : "=&r" (temp), "+m" (v->counter)
5766+ : "Ir" (i));
5767+ } else if (kernel_uses_llsc) {
5768+ __asm__ __volatile__(
5769+ " .set mips3 \n"
5770+ "1: ll %0, %1 # atomic64_sub \n"
5771+#ifdef CONFIG_PAX_REFCOUNT
5772+ /* Exception on overflow. */
5773+ "2: sub %0, %2 \n"
5774+#else
5775+ " subu %0, %2 \n"
5776+#endif
5777+ " sc %0, %1 \n"
5778+ " beqz %0, 1b \n"
5779+#ifdef CONFIG_PAX_REFCOUNT
5780+ "3: \n"
5781+ _ASM_EXTABLE(2b, 3b)
5782+#endif
5783+ " .set mips0 \n"
5784+ : "=&r" (temp), "+m" (v->counter)
5785+ : "Ir" (i));
5786+ } else {
5787+ unsigned long flags;
5788+
5789+ raw_local_irq_save(flags);
5790+ __asm__ __volatile__(
5791+#ifdef CONFIG_PAX_REFCOUNT
5792+ /* Exception on overflow. */
5793+ "1: sub %0, %1 \n"
5794+ "2: \n"
5795+ _ASM_EXTABLE(1b, 2b)
5796+#else
5797+ " subu %0, %1 \n"
5798+#endif
5799+ : "+r" (v->counter) : "Ir" (i));
5800+ raw_local_irq_restore(flags);
5801+ }
5802+}
5803+
5804+static __inline__ void atomic_sub_unchecked(long i, atomic_unchecked_t *v)
5805 {
5806 if (kernel_uses_llsc && R10000_LLSC_WAR) {
5807 int temp;
5808@@ -129,7 +281,93 @@ static __inline__ void atomic_sub(int i, atomic_t * v)
5809 /*
5810 * Same as above, but return the result value
5811 */
5812-static __inline__ int atomic_add_return(int i, atomic_t * v)
5813+static __inline__ int atomic_add_return(int i, atomic_t *v)
5814+{
5815+ int result;
5816+ int temp;
5817+
5818+ smp_mb__before_llsc();
5819+
5820+ if (kernel_uses_llsc && R10000_LLSC_WAR) {
5821+ __asm__ __volatile__(
5822+ " .set mips3 \n"
5823+ "1: ll %1, %2 # atomic_add_return \n"
5824+#ifdef CONFIG_PAX_REFCOUNT
5825+ "2: add %0, %1, %3 \n"
5826+#else
5827+ " addu %0, %1, %3 \n"
5828+#endif
5829+ " sc %0, %2 \n"
5830+ " beqzl %0, 1b \n"
5831+#ifdef CONFIG_PAX_REFCOUNT
5832+ " b 4f \n"
5833+ " .set noreorder \n"
5834+ "3: b 5f \n"
5835+ " move %0, %1 \n"
5836+ " .set reorder \n"
5837+ _ASM_EXTABLE(2b, 3b)
5838+#endif
5839+ "4: addu %0, %1, %3 \n"
5840+#ifdef CONFIG_PAX_REFCOUNT
5841+ "5: \n"
5842+#endif
5843+ " .set mips0 \n"
5844+ : "=&r" (result), "=&r" (temp), "+m" (v->counter)
5845+ : "Ir" (i));
5846+ } else if (kernel_uses_llsc) {
5847+ __asm__ __volatile__(
5848+ " .set mips3 \n"
5849+ "1: ll %1, %2 # atomic_add_return \n"
5850+#ifdef CONFIG_PAX_REFCOUNT
5851+ "2: add %0, %1, %3 \n"
5852+#else
5853+ " addu %0, %1, %3 \n"
5854+#endif
5855+ " sc %0, %2 \n"
5856+ " bnez %0, 4f \n"
5857+ " b 1b \n"
5858+#ifdef CONFIG_PAX_REFCOUNT
5859+ " .set noreorder \n"
5860+ "3: b 5f \n"
5861+ " move %0, %1 \n"
5862+ " .set reorder \n"
5863+ _ASM_EXTABLE(2b, 3b)
5864+#endif
5865+ "4: addu %0, %1, %3 \n"
5866+#ifdef CONFIG_PAX_REFCOUNT
5867+ "5: \n"
5868+#endif
5869+ " .set mips0 \n"
5870+ : "=&r" (result), "=&r" (temp), "+m" (v->counter)
5871+ : "Ir" (i));
5872+ } else {
5873+ unsigned long flags;
5874+
5875+ raw_local_irq_save(flags);
5876+ __asm__ __volatile__(
5877+ " lw %0, %1 \n"
5878+#ifdef CONFIG_PAX_REFCOUNT
5879+ /* Exception on overflow. */
5880+ "1: add %0, %2 \n"
5881+#else
5882+ " addu %0, %2 \n"
5883+#endif
5884+ " sw %0, %1 \n"
5885+#ifdef CONFIG_PAX_REFCOUNT
5886+ /* Note: Dest reg is not modified on overflow */
5887+ "2: \n"
5888+ _ASM_EXTABLE(1b, 2b)
5889+#endif
5890+ : "=&r" (result), "+m" (v->counter) : "Ir" (i));
5891+ raw_local_irq_restore(flags);
5892+ }
5893+
5894+ smp_llsc_mb();
5895+
5896+ return result;
5897+}
5898+
5899+static __inline__ int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
5900 {
5901 int result;
5902
5903@@ -178,7 +416,93 @@ static __inline__ int atomic_add_return(int i, atomic_t * v)
5904 return result;
5905 }
5906
5907-static __inline__ int atomic_sub_return(int i, atomic_t * v)
5908+static __inline__ int atomic_sub_return(int i, atomic_t *v)
5909+{
5910+ int result;
5911+ int temp;
5912+
5913+ smp_mb__before_llsc();
5914+
5915+ if (kernel_uses_llsc && R10000_LLSC_WAR) {
5916+ __asm__ __volatile__(
5917+ " .set mips3 \n"
5918+ "1: ll %1, %2 # atomic_sub_return \n"
5919+#ifdef CONFIG_PAX_REFCOUNT
5920+ "2: sub %0, %1, %3 \n"
5921+#else
5922+ " subu %0, %1, %3 \n"
5923+#endif
5924+ " sc %0, %2 \n"
5925+ " beqzl %0, 1b \n"
5926+#ifdef CONFIG_PAX_REFCOUNT
5927+ " b 4f \n"
5928+ " .set noreorder \n"
5929+ "3: b 5f \n"
5930+ " move %0, %1 \n"
5931+ " .set reorder \n"
5932+ _ASM_EXTABLE(2b, 3b)
5933+#endif
5934+ "4: subu %0, %1, %3 \n"
5935+#ifdef CONFIG_PAX_REFCOUNT
5936+ "5: \n"
5937+#endif
5938+ " .set mips0 \n"
5939+ : "=&r" (result), "=&r" (temp), "=m" (v->counter)
5940+ : "Ir" (i), "m" (v->counter)
5941+ : "memory");
5942+ } else if (kernel_uses_llsc) {
5943+ __asm__ __volatile__(
5944+ " .set mips3 \n"
5945+ "1: ll %1, %2 # atomic_sub_return \n"
5946+#ifdef CONFIG_PAX_REFCOUNT
5947+ "2: sub %0, %1, %3 \n"
5948+#else
5949+ " subu %0, %1, %3 \n"
5950+#endif
5951+ " sc %0, %2 \n"
5952+ " bnez %0, 4f \n"
5953+ " b 1b \n"
5954+#ifdef CONFIG_PAX_REFCOUNT
5955+ " .set noreorder \n"
5956+ "3: b 5f \n"
5957+ " move %0, %1 \n"
5958+ " .set reorder \n"
5959+ _ASM_EXTABLE(2b, 3b)
5960+#endif
5961+ "4: subu %0, %1, %3 \n"
5962+#ifdef CONFIG_PAX_REFCOUNT
5963+ "5: \n"
5964+#endif
5965+ " .set mips0 \n"
5966+ : "=&r" (result), "=&r" (temp), "+m" (v->counter)
5967+ : "Ir" (i));
5968+ } else {
5969+ unsigned long flags;
5970+
5971+ raw_local_irq_save(flags);
5972+ __asm__ __volatile__(
5973+ " lw %0, %1 \n"
5974+#ifdef CONFIG_PAX_REFCOUNT
5975+ /* Exception on overflow. */
5976+ "1: sub %0, %2 \n"
5977+#else
5978+ " subu %0, %2 \n"
5979+#endif
5980+ " sw %0, %1 \n"
5981+#ifdef CONFIG_PAX_REFCOUNT
5982+ /* Note: Dest reg is not modified on overflow */
5983+ "2: \n"
5984+ _ASM_EXTABLE(1b, 2b)
5985+#endif
5986+ : "=&r" (result), "+m" (v->counter) : "Ir" (i));
5987+ raw_local_irq_restore(flags);
5988+ }
5989+
5990+ smp_llsc_mb();
5991+
5992+ return result;
5993+}
5994+static __inline__ int atomic_sub_return_unchecked(int i, atomic_unchecked_t *v)
5995 {
5996 int result;
5997
5998@@ -238,7 +562,7 @@ static __inline__ int atomic_sub_return(int i, atomic_t * v)
5999 * Atomically test @v and subtract @i if @v is greater or equal than @i.
6000 * The function returns the old value of @v minus @i.
6001 */
6002-static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
6003+static __inline__ int atomic_sub_if_positive(int i, atomic_t *v)
6004 {
6005 int result;
6006
6007@@ -295,8 +619,26 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
6008 return result;
6009 }
6010
6011-#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
6012-#define atomic_xchg(v, new) (xchg(&((v)->counter), (new)))
6013+static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
6014+{
6015+ return cmpxchg(&v->counter, old, new);
6016+}
6017+
6018+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old,
6019+ int new)
6020+{
6021+ return cmpxchg(&(v->counter), old, new);
6022+}
6023+
6024+static inline int atomic_xchg(atomic_t *v, int new)
6025+{
6026+ return xchg(&v->counter, new);
6027+}
6028+
6029+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
6030+{
6031+ return xchg(&(v->counter), new);
6032+}
6033
6034 /**
6035 * __atomic_add_unless - add unless the number is a given value
6036@@ -324,6 +666,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
6037
6038 #define atomic_dec_return(v) atomic_sub_return(1, (v))
6039 #define atomic_inc_return(v) atomic_add_return(1, (v))
6040+static __inline__ int atomic_inc_return_unchecked(atomic_unchecked_t *v)
6041+{
6042+ return atomic_add_return_unchecked(1, v);
6043+}
6044
6045 /*
6046 * atomic_sub_and_test - subtract value from variable and test result
6047@@ -345,6 +691,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
6048 * other cases.
6049 */
6050 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
6051+static __inline__ int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
6052+{
6053+ return atomic_add_return_unchecked(1, v) == 0;
6054+}
6055
6056 /*
6057 * atomic_dec_and_test - decrement by 1 and test
6058@@ -369,6 +719,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
6059 * Atomically increments @v by 1.
6060 */
6061 #define atomic_inc(v) atomic_add(1, (v))
6062+static __inline__ void atomic_inc_unchecked(atomic_unchecked_t *v)
6063+{
6064+ atomic_add_unchecked(1, v);
6065+}
6066
6067 /*
6068 * atomic_dec - decrement and test
6069@@ -377,6 +731,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
6070 * Atomically decrements @v by 1.
6071 */
6072 #define atomic_dec(v) atomic_sub(1, (v))
6073+static __inline__ void atomic_dec_unchecked(atomic_unchecked_t *v)
6074+{
6075+ atomic_sub_unchecked(1, v);
6076+}
6077
6078 /*
6079 * atomic_add_negative - add and test if negative
6080@@ -398,14 +756,30 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
6081 * @v: pointer of type atomic64_t
6082 *
6083 */
6084-#define atomic64_read(v) (*(volatile long *)&(v)->counter)
6085+static inline long atomic64_read(const atomic64_t *v)
6086+{
6087+ return (*(volatile const long *) &v->counter);
6088+}
6089+
6090+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
6091+{
6092+ return (*(volatile const long *) &v->counter);
6093+}
6094
6095 /*
6096 * atomic64_set - set atomic variable
6097 * @v: pointer of type atomic64_t
6098 * @i: required value
6099 */
6100-#define atomic64_set(v, i) ((v)->counter = (i))
6101+static inline void atomic64_set(atomic64_t *v, long i)
6102+{
6103+ v->counter = i;
6104+}
6105+
6106+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
6107+{
6108+ v->counter = i;
6109+}
6110
6111 /*
6112 * atomic64_add - add integer to atomic variable
6113@@ -414,7 +788,66 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
6114 *
6115 * Atomically adds @i to @v.
6116 */
6117-static __inline__ void atomic64_add(long i, atomic64_t * v)
6118+static __inline__ void atomic64_add(long i, atomic64_t *v)
6119+{
6120+ long temp;
6121+
6122+ if (kernel_uses_llsc && R10000_LLSC_WAR) {
6123+ __asm__ __volatile__(
6124+ " .set mips3 \n"
6125+ "1: lld %0, %1 # atomic64_add \n"
6126+#ifdef CONFIG_PAX_REFCOUNT
6127+ /* Exception on overflow. */
6128+ "2: dadd %0, %2 \n"
6129+#else
6130+ " daddu %0, %2 \n"
6131+#endif
6132+ " scd %0, %1 \n"
6133+ " beqzl %0, 1b \n"
6134+#ifdef CONFIG_PAX_REFCOUNT
6135+ "3: \n"
6136+ _ASM_EXTABLE(2b, 3b)
6137+#endif
6138+ " .set mips0 \n"
6139+ : "=&r" (temp), "+m" (v->counter)
6140+ : "Ir" (i));
6141+ } else if (kernel_uses_llsc) {
6142+ __asm__ __volatile__(
6143+ " .set mips3 \n"
6144+ "1: lld %0, %1 # atomic64_add \n"
6145+#ifdef CONFIG_PAX_REFCOUNT
6146+ /* Exception on overflow. */
6147+ "2: dadd %0, %2 \n"
6148+#else
6149+ " daddu %0, %2 \n"
6150+#endif
6151+ " scd %0, %1 \n"
6152+ " beqz %0, 1b \n"
6153+#ifdef CONFIG_PAX_REFCOUNT
6154+ "3: \n"
6155+ _ASM_EXTABLE(2b, 3b)
6156+#endif
6157+ " .set mips0 \n"
6158+ : "=&r" (temp), "+m" (v->counter)
6159+ : "Ir" (i));
6160+ } else {
6161+ unsigned long flags;
6162+
6163+ raw_local_irq_save(flags);
6164+ __asm__ __volatile__(
6165+#ifdef CONFIG_PAX_REFCOUNT
6166+ /* Exception on overflow. */
6167+ "1: dadd %0, %1 \n"
6168+ "2: \n"
6169+ _ASM_EXTABLE(1b, 2b)
6170+#else
6171+ " daddu %0, %1 \n"
6172+#endif
6173+ : "+r" (v->counter) : "Ir" (i));
6174+ raw_local_irq_restore(flags);
6175+ }
6176+}
6177+static __inline__ void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
6178 {
6179 if (kernel_uses_llsc && R10000_LLSC_WAR) {
6180 long temp;
6181@@ -457,7 +890,67 @@ static __inline__ void atomic64_add(long i, atomic64_t * v)
6182 *
6183 * Atomically subtracts @i from @v.
6184 */
6185-static __inline__ void atomic64_sub(long i, atomic64_t * v)
6186+static __inline__ void atomic64_sub(long i, atomic64_t *v)
6187+{
6188+ long temp;
6189+
6190+ if (kernel_uses_llsc && R10000_LLSC_WAR) {
6191+ __asm__ __volatile__(
6192+ " .set mips3 \n"
6193+ "1: lld %0, %1 # atomic64_sub \n"
6194+#ifdef CONFIG_PAX_REFCOUNT
6195+ /* Exception on overflow. */
6196+ "2: dsub %0, %2 \n"
6197+#else
6198+ " dsubu %0, %2 \n"
6199+#endif
6200+ " scd %0, %1 \n"
6201+ " beqzl %0, 1b \n"
6202+#ifdef CONFIG_PAX_REFCOUNT
6203+ "3: \n"
6204+ _ASM_EXTABLE(2b, 3b)
6205+#endif
6206+ " .set mips0 \n"
6207+ : "=&r" (temp), "+m" (v->counter)
6208+ : "Ir" (i));
6209+ } else if (kernel_uses_llsc) {
6210+ __asm__ __volatile__(
6211+ " .set mips3 \n"
6212+ "1: lld %0, %1 # atomic64_sub \n"
6213+#ifdef CONFIG_PAX_REFCOUNT
6214+ /* Exception on overflow. */
6215+ "2: dsub %0, %2 \n"
6216+#else
6217+ " dsubu %0, %2 \n"
6218+#endif
6219+ " scd %0, %1 \n"
6220+ " beqz %0, 1b \n"
6221+#ifdef CONFIG_PAX_REFCOUNT
6222+ "3: \n"
6223+ _ASM_EXTABLE(2b, 3b)
6224+#endif
6225+ " .set mips0 \n"
6226+ : "=&r" (temp), "+m" (v->counter)
6227+ : "Ir" (i));
6228+ } else {
6229+ unsigned long flags;
6230+
6231+ raw_local_irq_save(flags);
6232+ __asm__ __volatile__(
6233+#ifdef CONFIG_PAX_REFCOUNT
6234+ /* Exception on overflow. */
6235+ "1: dsub %0, %1 \n"
6236+ "2: \n"
6237+ _ASM_EXTABLE(1b, 2b)
6238+#else
6239+ " dsubu %0, %1 \n"
6240+#endif
6241+ : "+r" (v->counter) : "Ir" (i));
6242+ raw_local_irq_restore(flags);
6243+ }
6244+}
6245+
6246+static __inline__ void atomic64_sub_unchecked(long i, atomic64_unchecked_t *v)
6247 {
6248 if (kernel_uses_llsc && R10000_LLSC_WAR) {
6249 long temp;
6250@@ -496,7 +989,93 @@ static __inline__ void atomic64_sub(long i, atomic64_t * v)
6251 /*
6252 * Same as above, but return the result value
6253 */
6254-static __inline__ long atomic64_add_return(long i, atomic64_t * v)
6255+static __inline__ long atomic64_add_return(long i, atomic64_t *v)
6256+{
6257+ long result;
6258+ long temp;
6259+
6260+ smp_mb__before_llsc();
6261+
6262+ if (kernel_uses_llsc && R10000_LLSC_WAR) {
6263+ __asm__ __volatile__(
6264+ " .set mips3 \n"
6265+ "1: lld %1, %2 # atomic64_add_return \n"
6266+#ifdef CONFIG_PAX_REFCOUNT
6267+ "2: dadd %0, %1, %3 \n"
6268+#else
6269+ " daddu %0, %1, %3 \n"
6270+#endif
6271+ " scd %0, %2 \n"
6272+ " beqzl %0, 1b \n"
6273+#ifdef CONFIG_PAX_REFCOUNT
6274+ " b 4f \n"
6275+ " .set noreorder \n"
6276+ "3: b 5f \n"
6277+ " move %0, %1 \n"
6278+ " .set reorder \n"
6279+ _ASM_EXTABLE(2b, 3b)
6280+#endif
6281+ "4: daddu %0, %1, %3 \n"
6282+#ifdef CONFIG_PAX_REFCOUNT
6283+ "5: \n"
6284+#endif
6285+ " .set mips0 \n"
6286+ : "=&r" (result), "=&r" (temp), "+m" (v->counter)
6287+ : "Ir" (i));
6288+ } else if (kernel_uses_llsc) {
6289+ __asm__ __volatile__(
6290+ " .set mips3 \n"
6291+ "1: lld %1, %2 # atomic64_add_return \n"
6292+#ifdef CONFIG_PAX_REFCOUNT
6293+ "2: dadd %0, %1, %3 \n"
6294+#else
6295+ " daddu %0, %1, %3 \n"
6296+#endif
6297+ " scd %0, %2 \n"
6298+ " bnez %0, 4f \n"
6299+ " b 1b \n"
6300+#ifdef CONFIG_PAX_REFCOUNT
6301+ " .set noreorder \n"
6302+ "3: b 5f \n"
6303+ " move %0, %1 \n"
6304+ " .set reorder \n"
6305+ _ASM_EXTABLE(2b, 3b)
6306+#endif
6307+ "4: daddu %0, %1, %3 \n"
6308+#ifdef CONFIG_PAX_REFCOUNT
6309+ "5: \n"
6310+#endif
6311+ " .set mips0 \n"
6312+ : "=&r" (result), "=&r" (temp), "=m" (v->counter)
6313+ : "Ir" (i), "m" (v->counter)
6314+ : "memory");
6315+ } else {
6316+ unsigned long flags;
6317+
6318+ raw_local_irq_save(flags);
6319+ __asm__ __volatile__(
6320+ " ld %0, %1 \n"
6321+#ifdef CONFIG_PAX_REFCOUNT
6322+ /* Exception on overflow. */
6323+ "1: dadd %0, %2 \n"
6324+#else
6325+ " daddu %0, %2 \n"
6326+#endif
6327+ " sd %0, %1 \n"
6328+#ifdef CONFIG_PAX_REFCOUNT
6329+ /* Note: Dest reg is not modified on overflow */
6330+ "2: \n"
6331+ _ASM_EXTABLE(1b, 2b)
6332+#endif
6333+ : "=&r" (result), "+m" (v->counter) : "Ir" (i));
6334+ raw_local_irq_restore(flags);
6335+ }
6336+
6337+ smp_llsc_mb();
6338+
6339+ return result;
6340+}
6341+static __inline__ long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
6342 {
6343 long result;
6344
6345@@ -546,7 +1125,97 @@ static __inline__ long atomic64_add_return(long i, atomic64_t * v)
6346 return result;
6347 }
6348
6349-static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
6350+static __inline__ long atomic64_sub_return(long i, atomic64_t *v)
6351+{
6352+ long result;
6353+ long temp;
6354+
6355+ smp_mb__before_llsc();
6356+
6357+ if (kernel_uses_llsc && R10000_LLSC_WAR) {
6358+ long temp;
6359+
6360+ __asm__ __volatile__(
6361+ " .set mips3 \n"
6362+ "1: lld %1, %2 # atomic64_sub_return \n"
6363+#ifdef CONFIG_PAX_REFCOUNT
6364+ "2: dsub %0, %1, %3 \n"
6365+#else
6366+ " dsubu %0, %1, %3 \n"
6367+#endif
6368+ " scd %0, %2 \n"
6369+ " beqzl %0, 1b \n"
6370+#ifdef CONFIG_PAX_REFCOUNT
6371+ " b 4f \n"
6372+ " .set noreorder \n"
6373+ "3: b 5f \n"
6374+ " move %0, %1 \n"
6375+ " .set reorder \n"
6376+ _ASM_EXTABLE(2b, 3b)
6377+#endif
6378+ "4: dsubu %0, %1, %3 \n"
6379+#ifdef CONFIG_PAX_REFCOUNT
6380+ "5: \n"
6381+#endif
6382+ " .set mips0 \n"
6383+ : "=&r" (result), "=&r" (temp), "=m" (v->counter)
6384+ : "Ir" (i), "m" (v->counter)
6385+ : "memory");
6386+ } else if (kernel_uses_llsc) {
6387+ __asm__ __volatile__(
6388+ " .set mips3 \n"
6389+ "1: lld %1, %2 # atomic64_sub_return \n"
6390+#ifdef CONFIG_PAX_REFCOUNT
6391+ "2: dsub %0, %1, %3 \n"
6392+#else
6393+ " dsubu %0, %1, %3 \n"
6394+#endif
6395+ " scd %0, %2 \n"
6396+ " bnez %0, 4f \n"
6397+ " b 1b \n"
6398+#ifdef CONFIG_PAX_REFCOUNT
6399+ " .set noreorder \n"
6400+ "3: b 5f \n"
6401+ " move %0, %1 \n"
6402+ " .set reorder \n"
6403+ _ASM_EXTABLE(2b, 3b)
6404+#endif
6405+ "4: dsubu %0, %1, %3 \n"
6406+#ifdef CONFIG_PAX_REFCOUNT
6407+ "5: \n"
6408+#endif
6409+ " .set mips0 \n"
6410+ : "=&r" (result), "=&r" (temp), "=m" (v->counter)
6411+ : "Ir" (i), "m" (v->counter)
6412+ : "memory");
6413+ } else {
6414+ unsigned long flags;
6415+
6416+ raw_local_irq_save(flags);
6417+ __asm__ __volatile__(
6418+ " ld %0, %1 \n"
6419+#ifdef CONFIG_PAX_REFCOUNT
6420+ /* Exception on overflow. */
6421+ "1: dsub %0, %2 \n"
6422+#else
6423+ " dsubu %0, %2 \n"
6424+#endif
6425+ " sd %0, %1 \n"
6426+#ifdef CONFIG_PAX_REFCOUNT
6427+ /* Note: Dest reg is not modified on overflow */
6428+ "2: \n"
6429+ _ASM_EXTABLE(1b, 2b)
6430+#endif
6431+ : "=&r" (result), "+m" (v->counter) : "Ir" (i));
6432+ raw_local_irq_restore(flags);
6433+ }
6434+
6435+ smp_llsc_mb();
6436+
6437+ return result;
6438+}
6439+
6440+static __inline__ long atomic64_sub_return_unchecked(long i, atomic64_unchecked_t *v)
6441 {
6442 long result;
6443
6444@@ -605,7 +1274,7 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
6445 * Atomically test @v and subtract @i if @v is greater or equal than @i.
6446 * The function returns the old value of @v minus @i.
6447 */
6448-static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
6449+static __inline__ long atomic64_sub_if_positive(long i, atomic64_t *v)
6450 {
6451 long result;
6452
6453@@ -662,9 +1331,26 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
6454 return result;
6455 }
6456
6457-#define atomic64_cmpxchg(v, o, n) \
6458- ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
6459-#define atomic64_xchg(v, new) (xchg(&((v)->counter), (new)))
6460+static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
6461+{
6462+ return cmpxchg(&v->counter, old, new);
6463+}
6464+
6465+static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old,
6466+ long new)
6467+{
6468+ return cmpxchg(&(v->counter), old, new);
6469+}
6470+
6471+static inline long atomic64_xchg(atomic64_t *v, long new)
6472+{
6473+ return xchg(&v->counter, new);
6474+}
6475+
6476+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
6477+{
6478+ return xchg(&(v->counter), new);
6479+}
6480
6481 /**
6482 * atomic64_add_unless - add unless the number is a given value
6483@@ -694,6 +1380,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
6484
6485 #define atomic64_dec_return(v) atomic64_sub_return(1, (v))
6486 #define atomic64_inc_return(v) atomic64_add_return(1, (v))
6487+#define atomic64_inc_return_unchecked(v) atomic64_add_return_unchecked(1, (v))
6488
6489 /*
6490 * atomic64_sub_and_test - subtract value from variable and test result
6491@@ -715,6 +1402,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
6492 * other cases.
6493 */
6494 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
6495+#define atomic64_inc_and_test_unchecked(v) atomic64_add_return_unchecked(1, (v)) == 0)
6496
6497 /*
6498 * atomic64_dec_and_test - decrement by 1 and test
6499@@ -739,6 +1427,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
6500 * Atomically increments @v by 1.
6501 */
6502 #define atomic64_inc(v) atomic64_add(1, (v))
6503+#define atomic64_inc_unchecked(v) atomic64_add_unchecked(1, (v))
6504
6505 /*
6506 * atomic64_dec - decrement and test
6507@@ -747,6 +1436,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
6508 * Atomically decrements @v by 1.
6509 */
6510 #define atomic64_dec(v) atomic64_sub(1, (v))
6511+#define atomic64_dec_unchecked(v) atomic64_sub_unchecked(1, (v))
6512
6513 /*
6514 * atomic64_add_negative - add and test if negative
6515diff --git a/arch/mips/include/asm/barrier.h b/arch/mips/include/asm/barrier.h
6516index e1aa4e4..670b68b 100644
6517--- a/arch/mips/include/asm/barrier.h
6518+++ b/arch/mips/include/asm/barrier.h
6519@@ -184,7 +184,7 @@
6520 do { \
6521 compiletime_assert_atomic_type(*p); \
6522 smp_mb(); \
6523- ACCESS_ONCE(*p) = (v); \
6524+ ACCESS_ONCE_RW(*p) = (v); \
6525 } while (0)
6526
6527 #define smp_load_acquire(p) \
6528diff --git a/arch/mips/include/asm/cache.h b/arch/mips/include/asm/cache.h
6529index b4db69f..8f3b093 100644
6530--- a/arch/mips/include/asm/cache.h
6531+++ b/arch/mips/include/asm/cache.h
6532@@ -9,10 +9,11 @@
6533 #ifndef _ASM_CACHE_H
6534 #define _ASM_CACHE_H
6535
6536+#include <linux/const.h>
6537 #include <kmalloc.h>
6538
6539 #define L1_CACHE_SHIFT CONFIG_MIPS_L1_CACHE_SHIFT
6540-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
6541+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
6542
6543 #define SMP_CACHE_SHIFT L1_CACHE_SHIFT
6544 #define SMP_CACHE_BYTES L1_CACHE_BYTES
6545diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h
6546index d414405..6bb4ba2 100644
6547--- a/arch/mips/include/asm/elf.h
6548+++ b/arch/mips/include/asm/elf.h
6549@@ -398,13 +398,16 @@ extern const char *__elf_platform;
6550 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
6551 #endif
6552
6553+#ifdef CONFIG_PAX_ASLR
6554+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
6555+
6556+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
6557+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
6558+#endif
6559+
6560 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
6561 struct linux_binprm;
6562 extern int arch_setup_additional_pages(struct linux_binprm *bprm,
6563 int uses_interp);
6564
6565-struct mm_struct;
6566-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
6567-#define arch_randomize_brk arch_randomize_brk
6568-
6569 #endif /* _ASM_ELF_H */
6570diff --git a/arch/mips/include/asm/exec.h b/arch/mips/include/asm/exec.h
6571index c1f6afa..38cc6e9 100644
6572--- a/arch/mips/include/asm/exec.h
6573+++ b/arch/mips/include/asm/exec.h
6574@@ -12,6 +12,6 @@
6575 #ifndef _ASM_EXEC_H
6576 #define _ASM_EXEC_H
6577
6578-extern unsigned long arch_align_stack(unsigned long sp);
6579+#define arch_align_stack(x) ((x) & ~0xfUL)
6580
6581 #endif /* _ASM_EXEC_H */
6582diff --git a/arch/mips/include/asm/hw_irq.h b/arch/mips/include/asm/hw_irq.h
6583index 9e8ef59..1139d6b 100644
6584--- a/arch/mips/include/asm/hw_irq.h
6585+++ b/arch/mips/include/asm/hw_irq.h
6586@@ -10,7 +10,7 @@
6587
6588 #include <linux/atomic.h>
6589
6590-extern atomic_t irq_err_count;
6591+extern atomic_unchecked_t irq_err_count;
6592
6593 /*
6594 * interrupt-retrigger: NOP for now. This may not be appropriate for all
6595diff --git a/arch/mips/include/asm/local.h b/arch/mips/include/asm/local.h
6596index d44622c..64990d2 100644
6597--- a/arch/mips/include/asm/local.h
6598+++ b/arch/mips/include/asm/local.h
6599@@ -12,15 +12,25 @@ typedef struct
6600 atomic_long_t a;
6601 } local_t;
6602
6603+typedef struct {
6604+ atomic_long_unchecked_t a;
6605+} local_unchecked_t;
6606+
6607 #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
6608
6609 #define local_read(l) atomic_long_read(&(l)->a)
6610+#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
6611 #define local_set(l, i) atomic_long_set(&(l)->a, (i))
6612+#define local_set_unchecked(l, i) atomic_long_set_unchecked(&(l)->a, (i))
6613
6614 #define local_add(i, l) atomic_long_add((i), (&(l)->a))
6615+#define local_add_unchecked(i, l) atomic_long_add_unchecked((i), (&(l)->a))
6616 #define local_sub(i, l) atomic_long_sub((i), (&(l)->a))
6617+#define local_sub_unchecked(i, l) atomic_long_sub_unchecked((i), (&(l)->a))
6618 #define local_inc(l) atomic_long_inc(&(l)->a)
6619+#define local_inc_unchecked(l) atomic_long_inc_unchecked(&(l)->a)
6620 #define local_dec(l) atomic_long_dec(&(l)->a)
6621+#define local_dec_unchecked(l) atomic_long_dec_unchecked(&(l)->a)
6622
6623 /*
6624 * Same as above, but return the result value
6625@@ -70,6 +80,51 @@ static __inline__ long local_add_return(long i, local_t * l)
6626 return result;
6627 }
6628
6629+static __inline__ long local_add_return_unchecked(long i, local_unchecked_t * l)
6630+{
6631+ unsigned long result;
6632+
6633+ if (kernel_uses_llsc && R10000_LLSC_WAR) {
6634+ unsigned long temp;
6635+
6636+ __asm__ __volatile__(
6637+ " .set mips3 \n"
6638+ "1:" __LL "%1, %2 # local_add_return \n"
6639+ " addu %0, %1, %3 \n"
6640+ __SC "%0, %2 \n"
6641+ " beqzl %0, 1b \n"
6642+ " addu %0, %1, %3 \n"
6643+ " .set mips0 \n"
6644+ : "=&r" (result), "=&r" (temp), "=m" (l->a.counter)
6645+ : "Ir" (i), "m" (l->a.counter)
6646+ : "memory");
6647+ } else if (kernel_uses_llsc) {
6648+ unsigned long temp;
6649+
6650+ __asm__ __volatile__(
6651+ " .set mips3 \n"
6652+ "1:" __LL "%1, %2 # local_add_return \n"
6653+ " addu %0, %1, %3 \n"
6654+ __SC "%0, %2 \n"
6655+ " beqz %0, 1b \n"
6656+ " addu %0, %1, %3 \n"
6657+ " .set mips0 \n"
6658+ : "=&r" (result), "=&r" (temp), "=m" (l->a.counter)
6659+ : "Ir" (i), "m" (l->a.counter)
6660+ : "memory");
6661+ } else {
6662+ unsigned long flags;
6663+
6664+ local_irq_save(flags);
6665+ result = l->a.counter;
6666+ result += i;
6667+ l->a.counter = result;
6668+ local_irq_restore(flags);
6669+ }
6670+
6671+ return result;
6672+}
6673+
6674 static __inline__ long local_sub_return(long i, local_t * l)
6675 {
6676 unsigned long result;
6677@@ -117,6 +172,8 @@ static __inline__ long local_sub_return(long i, local_t * l)
6678
6679 #define local_cmpxchg(l, o, n) \
6680 ((long)cmpxchg_local(&((l)->a.counter), (o), (n)))
6681+#define local_cmpxchg_unchecked(l, o, n) \
6682+ ((long)cmpxchg_local(&((l)->a.counter), (o), (n)))
6683 #define local_xchg(l, n) (atomic_long_xchg((&(l)->a), (n)))
6684
6685 /**
6686diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
6687index 5e08bcc..cfedefc 100644
6688--- a/arch/mips/include/asm/page.h
6689+++ b/arch/mips/include/asm/page.h
6690@@ -120,7 +120,7 @@ extern void copy_user_highpage(struct page *to, struct page *from,
6691 #ifdef CONFIG_CPU_MIPS32
6692 typedef struct { unsigned long pte_low, pte_high; } pte_t;
6693 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
6694- #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
6695+ #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
6696 #else
6697 typedef struct { unsigned long long pte; } pte_t;
6698 #define pte_val(x) ((x).pte)
6699diff --git a/arch/mips/include/asm/pgalloc.h b/arch/mips/include/asm/pgalloc.h
6700index b336037..5b874cc 100644
6701--- a/arch/mips/include/asm/pgalloc.h
6702+++ b/arch/mips/include/asm/pgalloc.h
6703@@ -37,6 +37,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
6704 {
6705 set_pud(pud, __pud((unsigned long)pmd));
6706 }
6707+
6708+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
6709+{
6710+ pud_populate(mm, pud, pmd);
6711+}
6712 #endif
6713
6714 /*
6715diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h
6716index 008324d..f67c239 100644
6717--- a/arch/mips/include/asm/pgtable.h
6718+++ b/arch/mips/include/asm/pgtable.h
6719@@ -20,6 +20,9 @@
6720 #include <asm/io.h>
6721 #include <asm/pgtable-bits.h>
6722
6723+#define ktla_ktva(addr) (addr)
6724+#define ktva_ktla(addr) (addr)
6725+
6726 struct mm_struct;
6727 struct vm_area_struct;
6728
6729diff --git a/arch/mips/include/asm/smtc_proc.h b/arch/mips/include/asm/smtc_proc.h
6730index 25da651..ae2a259 100644
6731--- a/arch/mips/include/asm/smtc_proc.h
6732+++ b/arch/mips/include/asm/smtc_proc.h
6733@@ -18,6 +18,6 @@ extern struct smtc_cpu_proc smtc_cpu_stats[NR_CPUS];
6734
6735 /* Count of number of recoveries of "stolen" FPU access rights on 34K */
6736
6737-extern atomic_t smtc_fpu_recoveries;
6738+extern atomic_unchecked_t smtc_fpu_recoveries;
6739
6740 #endif /* __ASM_SMTC_PROC_H */
6741diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h
6742index 24846f9..61c49f0 100644
6743--- a/arch/mips/include/asm/thread_info.h
6744+++ b/arch/mips/include/asm/thread_info.h
6745@@ -116,6 +116,8 @@ static inline struct thread_info *current_thread_info(void)
6746 #define TIF_LOAD_WATCH 25 /* If set, load watch registers */
6747 #define TIF_SYSCALL_TRACEPOINT 26 /* syscall tracepoint instrumentation */
6748 #define TIF_32BIT_FPREGS 27 /* 32-bit floating point registers */
6749+/* li takes a 32bit immediate */
6750+#define TIF_GRSEC_SETXID 29 /* update credentials on syscall entry/exit */
6751 #define TIF_SYSCALL_TRACE 31 /* syscall trace active */
6752
6753 #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
6754@@ -134,13 +136,14 @@ static inline struct thread_info *current_thread_info(void)
6755 #define _TIF_LOAD_WATCH (1<<TIF_LOAD_WATCH)
6756 #define _TIF_32BIT_FPREGS (1<<TIF_32BIT_FPREGS)
6757 #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
6758+#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
6759
6760 #define _TIF_WORK_SYSCALL_ENTRY (_TIF_NOHZ | _TIF_SYSCALL_TRACE | \
6761- _TIF_SYSCALL_AUDIT | _TIF_SYSCALL_TRACEPOINT)
6762+ _TIF_SYSCALL_AUDIT | _TIF_SYSCALL_TRACEPOINT | _TIF_GRSEC_SETXID)
6763
6764 /* work to do in syscall_trace_leave() */
6765 #define _TIF_WORK_SYSCALL_EXIT (_TIF_NOHZ | _TIF_SYSCALL_TRACE | \
6766- _TIF_SYSCALL_AUDIT | _TIF_SYSCALL_TRACEPOINT)
6767+ _TIF_SYSCALL_AUDIT | _TIF_SYSCALL_TRACEPOINT | _TIF_GRSEC_SETXID)
6768
6769 /* work to do on interrupt/exception return */
6770 #define _TIF_WORK_MASK \
6771@@ -148,7 +151,7 @@ static inline struct thread_info *current_thread_info(void)
6772 /* work to do on any return to u-space */
6773 #define _TIF_ALLWORK_MASK (_TIF_NOHZ | _TIF_WORK_MASK | \
6774 _TIF_WORK_SYSCALL_EXIT | \
6775- _TIF_SYSCALL_TRACEPOINT)
6776+ _TIF_SYSCALL_TRACEPOINT | _TIF_GRSEC_SETXID)
6777
6778 /*
6779 * We stash processor id into a COP0 register to retrieve it fast
6780diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uaccess.h
6781index f3fa375..3af6637 100644
6782--- a/arch/mips/include/asm/uaccess.h
6783+++ b/arch/mips/include/asm/uaccess.h
6784@@ -128,6 +128,7 @@ extern u64 __ua_limit;
6785 __ok == 0; \
6786 })
6787
6788+#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
6789 #define access_ok(type, addr, size) \
6790 likely(__access_ok((addr), (size), __access_mask))
6791
6792diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c
6793index 1188e00..41cf144 100644
6794--- a/arch/mips/kernel/binfmt_elfn32.c
6795+++ b/arch/mips/kernel/binfmt_elfn32.c
6796@@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
6797 #undef ELF_ET_DYN_BASE
6798 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
6799
6800+#ifdef CONFIG_PAX_ASLR
6801+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
6802+
6803+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
6804+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
6805+#endif
6806+
6807 #include <asm/processor.h>
6808 #include <linux/module.h>
6809 #include <linux/elfcore.h>
6810diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c
6811index 7faf5f2..f3d3cf4 100644
6812--- a/arch/mips/kernel/binfmt_elfo32.c
6813+++ b/arch/mips/kernel/binfmt_elfo32.c
6814@@ -70,6 +70,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
6815 #undef ELF_ET_DYN_BASE
6816 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
6817
6818+#ifdef CONFIG_PAX_ASLR
6819+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
6820+
6821+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
6822+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
6823+#endif
6824+
6825 #include <asm/processor.h>
6826
6827 /*
6828diff --git a/arch/mips/kernel/i8259.c b/arch/mips/kernel/i8259.c
6829index 2b91fe8..fe4f6b4 100644
6830--- a/arch/mips/kernel/i8259.c
6831+++ b/arch/mips/kernel/i8259.c
6832@@ -205,7 +205,7 @@ spurious_8259A_irq:
6833 printk(KERN_DEBUG "spurious 8259A interrupt: IRQ%d.\n", irq);
6834 spurious_irq_mask |= irqmask;
6835 }
6836- atomic_inc(&irq_err_count);
6837+ atomic_inc_unchecked(&irq_err_count);
6838 /*
6839 * Theoretically we do not have to handle this IRQ,
6840 * but in Linux this does not cause problems and is
6841diff --git a/arch/mips/kernel/irq-gt641xx.c b/arch/mips/kernel/irq-gt641xx.c
6842index 44a1f79..2bd6aa3 100644
6843--- a/arch/mips/kernel/irq-gt641xx.c
6844+++ b/arch/mips/kernel/irq-gt641xx.c
6845@@ -110,7 +110,7 @@ void gt641xx_irq_dispatch(void)
6846 }
6847 }
6848
6849- atomic_inc(&irq_err_count);
6850+ atomic_inc_unchecked(&irq_err_count);
6851 }
6852
6853 void __init gt641xx_irq_init(void)
6854diff --git a/arch/mips/kernel/irq.c b/arch/mips/kernel/irq.c
6855index d1fea7a..2e591b0 100644
6856--- a/arch/mips/kernel/irq.c
6857+++ b/arch/mips/kernel/irq.c
6858@@ -77,17 +77,17 @@ void ack_bad_irq(unsigned int irq)
6859 printk("unexpected IRQ # %d\n", irq);
6860 }
6861
6862-atomic_t irq_err_count;
6863+atomic_unchecked_t irq_err_count;
6864
6865 int arch_show_interrupts(struct seq_file *p, int prec)
6866 {
6867- seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
6868+ seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
6869 return 0;
6870 }
6871
6872 asmlinkage void spurious_interrupt(void)
6873 {
6874- atomic_inc(&irq_err_count);
6875+ atomic_inc_unchecked(&irq_err_count);
6876 }
6877
6878 void __init init_IRQ(void)
6879@@ -110,7 +110,10 @@ void __init init_IRQ(void)
6880 #endif
6881 }
6882
6883+
6884 #ifdef DEBUG_STACKOVERFLOW
6885+extern void gr_handle_kernel_exploit(void);
6886+
6887 static inline void check_stack_overflow(void)
6888 {
6889 unsigned long sp;
6890@@ -126,6 +129,7 @@ static inline void check_stack_overflow(void)
6891 printk("do_IRQ: stack overflow: %ld\n",
6892 sp - sizeof(struct thread_info));
6893 dump_stack();
6894+ gr_handle_kernel_exploit();
6895 }
6896 }
6897 #else
6898diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
6899index 6ae540e..b7396dc 100644
6900--- a/arch/mips/kernel/process.c
6901+++ b/arch/mips/kernel/process.c
6902@@ -562,15 +562,3 @@ unsigned long get_wchan(struct task_struct *task)
6903 out:
6904 return pc;
6905 }
6906-
6907-/*
6908- * Don't forget that the stack pointer must be aligned on a 8 bytes
6909- * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
6910- */
6911-unsigned long arch_align_stack(unsigned long sp)
6912-{
6913- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
6914- sp -= get_random_int() & ~PAGE_MASK;
6915-
6916- return sp & ALMASK;
6917-}
6918diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
6919index 7da9b76..21578be 100644
6920--- a/arch/mips/kernel/ptrace.c
6921+++ b/arch/mips/kernel/ptrace.c
6922@@ -658,6 +658,10 @@ long arch_ptrace(struct task_struct *child, long request,
6923 return ret;
6924 }
6925
6926+#ifdef CONFIG_GRKERNSEC_SETXID
6927+extern void gr_delayed_cred_worker(void);
6928+#endif
6929+
6930 /*
6931 * Notification of system call entry/exit
6932 * - triggered by current->work.syscall_trace
6933@@ -674,6 +678,11 @@ asmlinkage void syscall_trace_enter(struct pt_regs *regs)
6934 tracehook_report_syscall_entry(regs))
6935 ret = -1;
6936
6937+#ifdef CONFIG_GRKERNSEC_SETXID
6938+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
6939+ gr_delayed_cred_worker();
6940+#endif
6941+
6942 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
6943 trace_sys_enter(regs, regs->regs[2]);
6944
6945diff --git a/arch/mips/kernel/reset.c b/arch/mips/kernel/reset.c
6946index 07fc524..b9d7f28 100644
6947--- a/arch/mips/kernel/reset.c
6948+++ b/arch/mips/kernel/reset.c
6949@@ -13,6 +13,7 @@
6950 #include <linux/reboot.h>
6951
6952 #include <asm/reboot.h>
6953+#include <asm/bug.h>
6954
6955 /*
6956 * Urgs ... Too many MIPS machines to handle this in a generic way.
6957@@ -29,16 +30,19 @@ void machine_restart(char *command)
6958 {
6959 if (_machine_restart)
6960 _machine_restart(command);
6961+ BUG();
6962 }
6963
6964 void machine_halt(void)
6965 {
6966 if (_machine_halt)
6967 _machine_halt();
6968+ BUG();
6969 }
6970
6971 void machine_power_off(void)
6972 {
6973 if (pm_power_off)
6974 pm_power_off();
6975+ BUG();
6976 }
6977diff --git a/arch/mips/kernel/smtc-proc.c b/arch/mips/kernel/smtc-proc.c
6978index c10aa84..9ec2e60 100644
6979--- a/arch/mips/kernel/smtc-proc.c
6980+++ b/arch/mips/kernel/smtc-proc.c
6981@@ -31,7 +31,7 @@ unsigned long selfipis[NR_CPUS];
6982
6983 struct smtc_cpu_proc smtc_cpu_stats[NR_CPUS];
6984
6985-atomic_t smtc_fpu_recoveries;
6986+atomic_unchecked_t smtc_fpu_recoveries;
6987
6988 static int smtc_proc_show(struct seq_file *m, void *v)
6989 {
6990@@ -48,7 +48,7 @@ static int smtc_proc_show(struct seq_file *m, void *v)
6991 for(i = 0; i < NR_CPUS; i++)
6992 seq_printf(m, "%d: %ld\n", i, smtc_cpu_stats[i].selfipis);
6993 seq_printf(m, "%d Recoveries of \"stolen\" FPU\n",
6994- atomic_read(&smtc_fpu_recoveries));
6995+ atomic_read_unchecked(&smtc_fpu_recoveries));
6996 return 0;
6997 }
6998
6999@@ -73,7 +73,7 @@ void init_smtc_stats(void)
7000 smtc_cpu_stats[i].selfipis = 0;
7001 }
7002
7003- atomic_set(&smtc_fpu_recoveries, 0);
7004+ atomic_set_unchecked(&smtc_fpu_recoveries, 0);
7005
7006 proc_create("smtc", 0444, NULL, &smtc_proc_fops);
7007 }
7008diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c
7009index dfc1b91..11a2c07 100644
7010--- a/arch/mips/kernel/smtc.c
7011+++ b/arch/mips/kernel/smtc.c
7012@@ -1359,7 +1359,7 @@ void smtc_soft_dump(void)
7013 }
7014 smtc_ipi_qdump();
7015 printk("%d Recoveries of \"stolen\" FPU\n",
7016- atomic_read(&smtc_fpu_recoveries));
7017+ atomic_read_unchecked(&smtc_fpu_recoveries));
7018 }
7019
7020
7021diff --git a/arch/mips/kernel/sync-r4k.c b/arch/mips/kernel/sync-r4k.c
7022index c24ad5f..9983ab2 100644
7023--- a/arch/mips/kernel/sync-r4k.c
7024+++ b/arch/mips/kernel/sync-r4k.c
7025@@ -20,8 +20,8 @@
7026 #include <asm/mipsregs.h>
7027
7028 static atomic_t count_start_flag = ATOMIC_INIT(0);
7029-static atomic_t count_count_start = ATOMIC_INIT(0);
7030-static atomic_t count_count_stop = ATOMIC_INIT(0);
7031+static atomic_unchecked_t count_count_start = ATOMIC_INIT(0);
7032+static atomic_unchecked_t count_count_stop = ATOMIC_INIT(0);
7033 static atomic_t count_reference = ATOMIC_INIT(0);
7034
7035 #define COUNTON 100
7036@@ -68,13 +68,13 @@ void synchronise_count_master(int cpu)
7037
7038 for (i = 0; i < NR_LOOPS; i++) {
7039 /* slaves loop on '!= 2' */
7040- while (atomic_read(&count_count_start) != 1)
7041+ while (atomic_read_unchecked(&count_count_start) != 1)
7042 mb();
7043- atomic_set(&count_count_stop, 0);
7044+ atomic_set_unchecked(&count_count_stop, 0);
7045 smp_wmb();
7046
7047 /* this lets the slaves write their count register */
7048- atomic_inc(&count_count_start);
7049+ atomic_inc_unchecked(&count_count_start);
7050
7051 /*
7052 * Everyone initialises count in the last loop:
7053@@ -85,11 +85,11 @@ void synchronise_count_master(int cpu)
7054 /*
7055 * Wait for all slaves to leave the synchronization point:
7056 */
7057- while (atomic_read(&count_count_stop) != 1)
7058+ while (atomic_read_unchecked(&count_count_stop) != 1)
7059 mb();
7060- atomic_set(&count_count_start, 0);
7061+ atomic_set_unchecked(&count_count_start, 0);
7062 smp_wmb();
7063- atomic_inc(&count_count_stop);
7064+ atomic_inc_unchecked(&count_count_stop);
7065 }
7066 /* Arrange for an interrupt in a short while */
7067 write_c0_compare(read_c0_count() + COUNTON);
7068@@ -130,8 +130,8 @@ void synchronise_count_slave(int cpu)
7069 initcount = atomic_read(&count_reference);
7070
7071 for (i = 0; i < NR_LOOPS; i++) {
7072- atomic_inc(&count_count_start);
7073- while (atomic_read(&count_count_start) != 2)
7074+ atomic_inc_unchecked(&count_count_start);
7075+ while (atomic_read_unchecked(&count_count_start) != 2)
7076 mb();
7077
7078 /*
7079@@ -140,8 +140,8 @@ void synchronise_count_slave(int cpu)
7080 if (i == NR_LOOPS-1)
7081 write_c0_count(initcount);
7082
7083- atomic_inc(&count_count_stop);
7084- while (atomic_read(&count_count_stop) != 2)
7085+ atomic_inc_unchecked(&count_count_stop);
7086+ while (atomic_read_unchecked(&count_count_stop) != 2)
7087 mb();
7088 }
7089 /* Arrange for an interrupt in a short while */
7090diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
7091index e0b4996..6b43ce7 100644
7092--- a/arch/mips/kernel/traps.c
7093+++ b/arch/mips/kernel/traps.c
7094@@ -691,7 +691,18 @@ asmlinkage void do_ov(struct pt_regs *regs)
7095 siginfo_t info;
7096
7097 prev_state = exception_enter();
7098- die_if_kernel("Integer overflow", regs);
7099+ if (unlikely(!user_mode(regs))) {
7100+
7101+#ifdef CONFIG_PAX_REFCOUNT
7102+ if (fixup_exception(regs)) {
7103+ pax_report_refcount_overflow(regs);
7104+ exception_exit(prev_state);
7105+ return;
7106+ }
7107+#endif
7108+
7109+ die("Integer overflow", regs);
7110+ }
7111
7112 info.si_code = FPE_INTOVF;
7113 info.si_signo = SIGFPE;
7114diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
7115index becc42b..9e43d4b 100644
7116--- a/arch/mips/mm/fault.c
7117+++ b/arch/mips/mm/fault.c
7118@@ -28,6 +28,23 @@
7119 #include <asm/highmem.h> /* For VMALLOC_END */
7120 #include <linux/kdebug.h>
7121
7122+#ifdef CONFIG_PAX_PAGEEXEC
7123+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
7124+{
7125+ unsigned long i;
7126+
7127+ printk(KERN_ERR "PAX: bytes at PC: ");
7128+ for (i = 0; i < 5; i++) {
7129+ unsigned int c;
7130+ if (get_user(c, (unsigned int *)pc+i))
7131+ printk(KERN_CONT "???????? ");
7132+ else
7133+ printk(KERN_CONT "%08x ", c);
7134+ }
7135+ printk("\n");
7136+}
7137+#endif
7138+
7139 /*
7140 * This routine handles page faults. It determines the address,
7141 * and the problem, and then passes it off to one of the appropriate
7142@@ -199,6 +216,14 @@ bad_area:
7143 bad_area_nosemaphore:
7144 /* User mode accesses just cause a SIGSEGV */
7145 if (user_mode(regs)) {
7146+
7147+#ifdef CONFIG_PAX_PAGEEXEC
7148+ if (cpu_has_rixi && (mm->pax_flags & MF_PAX_PAGEEXEC) && !write && address == instruction_pointer(regs)) {
7149+ pax_report_fault(regs, (void *)address, (void *)user_stack_pointer(regs));
7150+ do_group_exit(SIGKILL);
7151+ }
7152+#endif
7153+
7154 tsk->thread.cp0_badvaddr = address;
7155 tsk->thread.error_code = write;
7156 #if 0
7157diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c
7158index f1baadd..5472dca 100644
7159--- a/arch/mips/mm/mmap.c
7160+++ b/arch/mips/mm/mmap.c
7161@@ -59,6 +59,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
7162 struct vm_area_struct *vma;
7163 unsigned long addr = addr0;
7164 int do_color_align;
7165+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
7166 struct vm_unmapped_area_info info;
7167
7168 if (unlikely(len > TASK_SIZE))
7169@@ -84,6 +85,11 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
7170 do_color_align = 1;
7171
7172 /* requesting a specific address */
7173+
7174+#ifdef CONFIG_PAX_RANDMMAP
7175+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
7176+#endif
7177+
7178 if (addr) {
7179 if (do_color_align)
7180 addr = COLOUR_ALIGN(addr, pgoff);
7181@@ -91,14 +97,14 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
7182 addr = PAGE_ALIGN(addr);
7183
7184 vma = find_vma(mm, addr);
7185- if (TASK_SIZE - len >= addr &&
7186- (!vma || addr + len <= vma->vm_start))
7187+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
7188 return addr;
7189 }
7190
7191 info.length = len;
7192 info.align_mask = do_color_align ? (PAGE_MASK & shm_align_mask) : 0;
7193 info.align_offset = pgoff << PAGE_SHIFT;
7194+ info.threadstack_offset = offset;
7195
7196 if (dir == DOWN) {
7197 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
7198@@ -146,6 +152,10 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
7199 {
7200 unsigned long random_factor = 0UL;
7201
7202+#ifdef CONFIG_PAX_RANDMMAP
7203+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
7204+#endif
7205+
7206 if (current->flags & PF_RANDOMIZE) {
7207 random_factor = get_random_int();
7208 random_factor = random_factor << PAGE_SHIFT;
7209@@ -157,40 +167,25 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
7210
7211 if (mmap_is_legacy()) {
7212 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
7213+
7214+#ifdef CONFIG_PAX_RANDMMAP
7215+ if (mm->pax_flags & MF_PAX_RANDMMAP)
7216+ mm->mmap_base += mm->delta_mmap;
7217+#endif
7218+
7219 mm->get_unmapped_area = arch_get_unmapped_area;
7220 } else {
7221 mm->mmap_base = mmap_base(random_factor);
7222+
7223+#ifdef CONFIG_PAX_RANDMMAP
7224+ if (mm->pax_flags & MF_PAX_RANDMMAP)
7225+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
7226+#endif
7227+
7228 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
7229 }
7230 }
7231
7232-static inline unsigned long brk_rnd(void)
7233-{
7234- unsigned long rnd = get_random_int();
7235-
7236- rnd = rnd << PAGE_SHIFT;
7237- /* 8MB for 32bit, 256MB for 64bit */
7238- if (TASK_IS_32BIT_ADDR)
7239- rnd = rnd & 0x7ffffful;
7240- else
7241- rnd = rnd & 0xffffffful;
7242-
7243- return rnd;
7244-}
7245-
7246-unsigned long arch_randomize_brk(struct mm_struct *mm)
7247-{
7248- unsigned long base = mm->brk;
7249- unsigned long ret;
7250-
7251- ret = PAGE_ALIGN(base + brk_rnd());
7252-
7253- if (ret < mm->brk)
7254- return mm->brk;
7255-
7256- return ret;
7257-}
7258-
7259 int __virt_addr_valid(const volatile void *kaddr)
7260 {
7261 return pfn_valid(PFN_DOWN(virt_to_phys(kaddr)));
7262diff --git a/arch/mips/pci/pci-octeon.c b/arch/mips/pci/pci-octeon.c
7263index 59cccd9..f39ac2f 100644
7264--- a/arch/mips/pci/pci-octeon.c
7265+++ b/arch/mips/pci/pci-octeon.c
7266@@ -327,8 +327,8 @@ static int octeon_write_config(struct pci_bus *bus, unsigned int devfn,
7267
7268
7269 static struct pci_ops octeon_pci_ops = {
7270- octeon_read_config,
7271- octeon_write_config,
7272+ .read = octeon_read_config,
7273+ .write = octeon_write_config,
7274 };
7275
7276 static struct resource octeon_pci_mem_resource = {
7277diff --git a/arch/mips/pci/pcie-octeon.c b/arch/mips/pci/pcie-octeon.c
7278index 5e36c33..eb4a17b 100644
7279--- a/arch/mips/pci/pcie-octeon.c
7280+++ b/arch/mips/pci/pcie-octeon.c
7281@@ -1792,8 +1792,8 @@ static int octeon_dummy_write_config(struct pci_bus *bus, unsigned int devfn,
7282 }
7283
7284 static struct pci_ops octeon_pcie0_ops = {
7285- octeon_pcie0_read_config,
7286- octeon_pcie0_write_config,
7287+ .read = octeon_pcie0_read_config,
7288+ .write = octeon_pcie0_write_config,
7289 };
7290
7291 static struct resource octeon_pcie0_mem_resource = {
7292@@ -1813,8 +1813,8 @@ static struct pci_controller octeon_pcie0_controller = {
7293 };
7294
7295 static struct pci_ops octeon_pcie1_ops = {
7296- octeon_pcie1_read_config,
7297- octeon_pcie1_write_config,
7298+ .read = octeon_pcie1_read_config,
7299+ .write = octeon_pcie1_write_config,
7300 };
7301
7302 static struct resource octeon_pcie1_mem_resource = {
7303@@ -1834,8 +1834,8 @@ static struct pci_controller octeon_pcie1_controller = {
7304 };
7305
7306 static struct pci_ops octeon_dummy_ops = {
7307- octeon_dummy_read_config,
7308- octeon_dummy_write_config,
7309+ .read = octeon_dummy_read_config,
7310+ .write = octeon_dummy_write_config,
7311 };
7312
7313 static struct resource octeon_dummy_mem_resource = {
7314diff --git a/arch/mips/sgi-ip27/ip27-nmi.c b/arch/mips/sgi-ip27/ip27-nmi.c
7315index a2358b4..7cead4f 100644
7316--- a/arch/mips/sgi-ip27/ip27-nmi.c
7317+++ b/arch/mips/sgi-ip27/ip27-nmi.c
7318@@ -187,9 +187,9 @@ void
7319 cont_nmi_dump(void)
7320 {
7321 #ifndef REAL_NMI_SIGNAL
7322- static atomic_t nmied_cpus = ATOMIC_INIT(0);
7323+ static atomic_unchecked_t nmied_cpus = ATOMIC_INIT(0);
7324
7325- atomic_inc(&nmied_cpus);
7326+ atomic_inc_unchecked(&nmied_cpus);
7327 #endif
7328 /*
7329 * Only allow 1 cpu to proceed
7330@@ -233,7 +233,7 @@ cont_nmi_dump(void)
7331 udelay(10000);
7332 }
7333 #else
7334- while (atomic_read(&nmied_cpus) != num_online_cpus());
7335+ while (atomic_read_unchecked(&nmied_cpus) != num_online_cpus());
7336 #endif
7337
7338 /*
7339diff --git a/arch/mips/sni/rm200.c b/arch/mips/sni/rm200.c
7340index a046b30..6799527 100644
7341--- a/arch/mips/sni/rm200.c
7342+++ b/arch/mips/sni/rm200.c
7343@@ -270,7 +270,7 @@ spurious_8259A_irq:
7344 "spurious RM200 8259A interrupt: IRQ%d.\n", irq);
7345 spurious_irq_mask |= irqmask;
7346 }
7347- atomic_inc(&irq_err_count);
7348+ atomic_inc_unchecked(&irq_err_count);
7349 /*
7350 * Theoretically we do not have to handle this IRQ,
7351 * but in Linux this does not cause problems and is
7352diff --git a/arch/mips/vr41xx/common/icu.c b/arch/mips/vr41xx/common/icu.c
7353index 41e873b..34d33a7 100644
7354--- a/arch/mips/vr41xx/common/icu.c
7355+++ b/arch/mips/vr41xx/common/icu.c
7356@@ -653,7 +653,7 @@ static int icu_get_irq(unsigned int irq)
7357
7358 printk(KERN_ERR "spurious ICU interrupt: %04x,%04x\n", pend1, pend2);
7359
7360- atomic_inc(&irq_err_count);
7361+ atomic_inc_unchecked(&irq_err_count);
7362
7363 return -1;
7364 }
7365diff --git a/arch/mips/vr41xx/common/irq.c b/arch/mips/vr41xx/common/irq.c
7366index ae0e4ee..e8f0692 100644
7367--- a/arch/mips/vr41xx/common/irq.c
7368+++ b/arch/mips/vr41xx/common/irq.c
7369@@ -64,7 +64,7 @@ static void irq_dispatch(unsigned int irq)
7370 irq_cascade_t *cascade;
7371
7372 if (irq >= NR_IRQS) {
7373- atomic_inc(&irq_err_count);
7374+ atomic_inc_unchecked(&irq_err_count);
7375 return;
7376 }
7377
7378@@ -84,7 +84,7 @@ static void irq_dispatch(unsigned int irq)
7379 ret = cascade->get_irq(irq);
7380 irq = ret;
7381 if (ret < 0)
7382- atomic_inc(&irq_err_count);
7383+ atomic_inc_unchecked(&irq_err_count);
7384 else
7385 irq_dispatch(irq);
7386 if (!irqd_irq_disabled(idata) && chip->irq_unmask)
7387diff --git a/arch/mn10300/proc-mn103e010/include/proc/cache.h b/arch/mn10300/proc-mn103e010/include/proc/cache.h
7388index 967d144..db12197 100644
7389--- a/arch/mn10300/proc-mn103e010/include/proc/cache.h
7390+++ b/arch/mn10300/proc-mn103e010/include/proc/cache.h
7391@@ -11,12 +11,14 @@
7392 #ifndef _ASM_PROC_CACHE_H
7393 #define _ASM_PROC_CACHE_H
7394
7395+#include <linux/const.h>
7396+
7397 /* L1 cache */
7398
7399 #define L1_CACHE_NWAYS 4 /* number of ways in caches */
7400 #define L1_CACHE_NENTRIES 256 /* number of entries in each way */
7401-#define L1_CACHE_BYTES 16 /* bytes per entry */
7402 #define L1_CACHE_SHIFT 4 /* shift for bytes per entry */
7403+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */
7404 #define L1_CACHE_WAYDISP 0x1000 /* displacement of one way from the next */
7405
7406 #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
7407diff --git a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
7408index bcb5df2..84fabd2 100644
7409--- a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
7410+++ b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
7411@@ -16,13 +16,15 @@
7412 #ifndef _ASM_PROC_CACHE_H
7413 #define _ASM_PROC_CACHE_H
7414
7415+#include <linux/const.h>
7416+
7417 /*
7418 * L1 cache
7419 */
7420 #define L1_CACHE_NWAYS 4 /* number of ways in caches */
7421 #define L1_CACHE_NENTRIES 128 /* number of entries in each way */
7422-#define L1_CACHE_BYTES 32 /* bytes per entry */
7423 #define L1_CACHE_SHIFT 5 /* shift for bytes per entry */
7424+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */
7425 #define L1_CACHE_WAYDISP 0x1000 /* distance from one way to the next */
7426
7427 #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
7428diff --git a/arch/openrisc/include/asm/cache.h b/arch/openrisc/include/asm/cache.h
7429index 4ce7a01..449202a 100644
7430--- a/arch/openrisc/include/asm/cache.h
7431+++ b/arch/openrisc/include/asm/cache.h
7432@@ -19,11 +19,13 @@
7433 #ifndef __ASM_OPENRISC_CACHE_H
7434 #define __ASM_OPENRISC_CACHE_H
7435
7436+#include <linux/const.h>
7437+
7438 /* FIXME: How can we replace these with values from the CPU...
7439 * they shouldn't be hard-coded!
7440 */
7441
7442-#define L1_CACHE_BYTES 16
7443 #define L1_CACHE_SHIFT 4
7444+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7445
7446 #endif /* __ASM_OPENRISC_CACHE_H */
7447diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h
7448index 472886c..00e7df9 100644
7449--- a/arch/parisc/include/asm/atomic.h
7450+++ b/arch/parisc/include/asm/atomic.h
7451@@ -252,6 +252,16 @@ static inline long atomic64_dec_if_positive(atomic64_t *v)
7452 return dec;
7453 }
7454
7455+#define atomic64_read_unchecked(v) atomic64_read(v)
7456+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
7457+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
7458+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
7459+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
7460+#define atomic64_inc_unchecked(v) atomic64_inc(v)
7461+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
7462+#define atomic64_dec_unchecked(v) atomic64_dec(v)
7463+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
7464+
7465 #endif /* !CONFIG_64BIT */
7466
7467
7468diff --git a/arch/parisc/include/asm/cache.h b/arch/parisc/include/asm/cache.h
7469index 47f11c7..3420df2 100644
7470--- a/arch/parisc/include/asm/cache.h
7471+++ b/arch/parisc/include/asm/cache.h
7472@@ -5,6 +5,7 @@
7473 #ifndef __ARCH_PARISC_CACHE_H
7474 #define __ARCH_PARISC_CACHE_H
7475
7476+#include <linux/const.h>
7477
7478 /*
7479 * PA 2.0 processors have 64-byte cachelines; PA 1.1 processors have
7480@@ -15,13 +16,13 @@
7481 * just ruin performance.
7482 */
7483 #ifdef CONFIG_PA20
7484-#define L1_CACHE_BYTES 64
7485 #define L1_CACHE_SHIFT 6
7486 #else
7487-#define L1_CACHE_BYTES 32
7488 #define L1_CACHE_SHIFT 5
7489 #endif
7490
7491+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7492+
7493 #ifndef __ASSEMBLY__
7494
7495 #define SMP_CACHE_BYTES L1_CACHE_BYTES
7496diff --git a/arch/parisc/include/asm/elf.h b/arch/parisc/include/asm/elf.h
7497index 3391d06..c23a2cc 100644
7498--- a/arch/parisc/include/asm/elf.h
7499+++ b/arch/parisc/include/asm/elf.h
7500@@ -342,6 +342,13 @@ struct pt_regs; /* forward declaration... */
7501
7502 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
7503
7504+#ifdef CONFIG_PAX_ASLR
7505+#define PAX_ELF_ET_DYN_BASE 0x10000UL
7506+
7507+#define PAX_DELTA_MMAP_LEN 16
7508+#define PAX_DELTA_STACK_LEN 16
7509+#endif
7510+
7511 /* This yields a mask that user programs can use to figure out what
7512 instruction set this CPU supports. This could be done in user space,
7513 but it's not easy, and we've already done it here. */
7514diff --git a/arch/parisc/include/asm/pgalloc.h b/arch/parisc/include/asm/pgalloc.h
7515index f213f5b..0af3e8e 100644
7516--- a/arch/parisc/include/asm/pgalloc.h
7517+++ b/arch/parisc/include/asm/pgalloc.h
7518@@ -61,6 +61,11 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
7519 (__u32)(__pa((unsigned long)pmd) >> PxD_VALUE_SHIFT));
7520 }
7521
7522+static inline void pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
7523+{
7524+ pgd_populate(mm, pgd, pmd);
7525+}
7526+
7527 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
7528 {
7529 pmd_t *pmd = (pmd_t *)__get_free_pages(GFP_KERNEL|__GFP_REPEAT,
7530@@ -93,6 +98,7 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
7531 #define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); })
7532 #define pmd_free(mm, x) do { } while (0)
7533 #define pgd_populate(mm, pmd, pte) BUG()
7534+#define pgd_populate_kernel(mm, pmd, pte) BUG()
7535
7536 #endif
7537
7538diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
7539index 22b89d1..ce34230 100644
7540--- a/arch/parisc/include/asm/pgtable.h
7541+++ b/arch/parisc/include/asm/pgtable.h
7542@@ -223,6 +223,17 @@ extern void purge_tlb_entries(struct mm_struct *, unsigned long);
7543 #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
7544 #define PAGE_COPY PAGE_EXECREAD
7545 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
7546+
7547+#ifdef CONFIG_PAX_PAGEEXEC
7548+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
7549+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
7550+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
7551+#else
7552+# define PAGE_SHARED_NOEXEC PAGE_SHARED
7553+# define PAGE_COPY_NOEXEC PAGE_COPY
7554+# define PAGE_READONLY_NOEXEC PAGE_READONLY
7555+#endif
7556+
7557 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
7558 #define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL_EXEC)
7559 #define PAGE_KERNEL_RWX __pgprot(_PAGE_KERNEL_RWX)
7560diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h
7561index 4006964..fcb3cc2 100644
7562--- a/arch/parisc/include/asm/uaccess.h
7563+++ b/arch/parisc/include/asm/uaccess.h
7564@@ -246,10 +246,10 @@ static inline unsigned long __must_check copy_from_user(void *to,
7565 const void __user *from,
7566 unsigned long n)
7567 {
7568- int sz = __compiletime_object_size(to);
7569+ size_t sz = __compiletime_object_size(to);
7570 int ret = -EFAULT;
7571
7572- if (likely(sz == -1 || !__builtin_constant_p(n) || sz >= n))
7573+ if (likely(sz == (size_t)-1 || !__builtin_constant_p(n) || sz >= n))
7574 ret = __copy_from_user(to, from, n);
7575 else
7576 copy_from_user_overflow();
7577diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c
7578index 50dfafc..b9fc230 100644
7579--- a/arch/parisc/kernel/module.c
7580+++ b/arch/parisc/kernel/module.c
7581@@ -98,16 +98,38 @@
7582
7583 /* three functions to determine where in the module core
7584 * or init pieces the location is */
7585+static inline int in_init_rx(struct module *me, void *loc)
7586+{
7587+ return (loc >= me->module_init_rx &&
7588+ loc < (me->module_init_rx + me->init_size_rx));
7589+}
7590+
7591+static inline int in_init_rw(struct module *me, void *loc)
7592+{
7593+ return (loc >= me->module_init_rw &&
7594+ loc < (me->module_init_rw + me->init_size_rw));
7595+}
7596+
7597 static inline int in_init(struct module *me, void *loc)
7598 {
7599- return (loc >= me->module_init &&
7600- loc <= (me->module_init + me->init_size));
7601+ return in_init_rx(me, loc) || in_init_rw(me, loc);
7602+}
7603+
7604+static inline int in_core_rx(struct module *me, void *loc)
7605+{
7606+ return (loc >= me->module_core_rx &&
7607+ loc < (me->module_core_rx + me->core_size_rx));
7608+}
7609+
7610+static inline int in_core_rw(struct module *me, void *loc)
7611+{
7612+ return (loc >= me->module_core_rw &&
7613+ loc < (me->module_core_rw + me->core_size_rw));
7614 }
7615
7616 static inline int in_core(struct module *me, void *loc)
7617 {
7618- return (loc >= me->module_core &&
7619- loc <= (me->module_core + me->core_size));
7620+ return in_core_rx(me, loc) || in_core_rw(me, loc);
7621 }
7622
7623 static inline int in_local(struct module *me, void *loc)
7624@@ -371,13 +393,13 @@ int module_frob_arch_sections(CONST Elf_Ehdr *hdr,
7625 }
7626
7627 /* align things a bit */
7628- me->core_size = ALIGN(me->core_size, 16);
7629- me->arch.got_offset = me->core_size;
7630- me->core_size += gots * sizeof(struct got_entry);
7631+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
7632+ me->arch.got_offset = me->core_size_rw;
7633+ me->core_size_rw += gots * sizeof(struct got_entry);
7634
7635- me->core_size = ALIGN(me->core_size, 16);
7636- me->arch.fdesc_offset = me->core_size;
7637- me->core_size += fdescs * sizeof(Elf_Fdesc);
7638+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
7639+ me->arch.fdesc_offset = me->core_size_rw;
7640+ me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
7641
7642 me->arch.got_max = gots;
7643 me->arch.fdesc_max = fdescs;
7644@@ -395,7 +417,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
7645
7646 BUG_ON(value == 0);
7647
7648- got = me->module_core + me->arch.got_offset;
7649+ got = me->module_core_rw + me->arch.got_offset;
7650 for (i = 0; got[i].addr; i++)
7651 if (got[i].addr == value)
7652 goto out;
7653@@ -413,7 +435,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
7654 #ifdef CONFIG_64BIT
7655 static Elf_Addr get_fdesc(struct module *me, unsigned long value)
7656 {
7657- Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
7658+ Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
7659
7660 if (!value) {
7661 printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
7662@@ -431,7 +453,7 @@ static Elf_Addr get_fdesc(struct module *me, unsigned long value)
7663
7664 /* Create new one */
7665 fdesc->addr = value;
7666- fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
7667+ fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
7668 return (Elf_Addr)fdesc;
7669 }
7670 #endif /* CONFIG_64BIT */
7671@@ -843,7 +865,7 @@ register_unwind_table(struct module *me,
7672
7673 table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
7674 end = table + sechdrs[me->arch.unwind_section].sh_size;
7675- gp = (Elf_Addr)me->module_core + me->arch.got_offset;
7676+ gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
7677
7678 DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
7679 me->arch.unwind_section, table, end, gp);
7680diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
7681index 31ffa9b..588a798 100644
7682--- a/arch/parisc/kernel/sys_parisc.c
7683+++ b/arch/parisc/kernel/sys_parisc.c
7684@@ -89,6 +89,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
7685 unsigned long task_size = TASK_SIZE;
7686 int do_color_align, last_mmap;
7687 struct vm_unmapped_area_info info;
7688+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
7689
7690 if (len > task_size)
7691 return -ENOMEM;
7692@@ -106,6 +107,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
7693 goto found_addr;
7694 }
7695
7696+#ifdef CONFIG_PAX_RANDMMAP
7697+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
7698+#endif
7699+
7700 if (addr) {
7701 if (do_color_align && last_mmap)
7702 addr = COLOR_ALIGN(addr, last_mmap, pgoff);
7703@@ -124,6 +129,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
7704 info.high_limit = mmap_upper_limit();
7705 info.align_mask = last_mmap ? (PAGE_MASK & (SHM_COLOUR - 1)) : 0;
7706 info.align_offset = shared_align_offset(last_mmap, pgoff);
7707+ info.threadstack_offset = offset;
7708 addr = vm_unmapped_area(&info);
7709
7710 found_addr:
7711@@ -143,6 +149,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7712 unsigned long addr = addr0;
7713 int do_color_align, last_mmap;
7714 struct vm_unmapped_area_info info;
7715+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
7716
7717 #ifdef CONFIG_64BIT
7718 /* This should only ever run for 32-bit processes. */
7719@@ -167,6 +174,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7720 }
7721
7722 /* requesting a specific address */
7723+#ifdef CONFIG_PAX_RANDMMAP
7724+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
7725+#endif
7726+
7727 if (addr) {
7728 if (do_color_align && last_mmap)
7729 addr = COLOR_ALIGN(addr, last_mmap, pgoff);
7730@@ -184,6 +195,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7731 info.high_limit = mm->mmap_base;
7732 info.align_mask = last_mmap ? (PAGE_MASK & (SHM_COLOUR - 1)) : 0;
7733 info.align_offset = shared_align_offset(last_mmap, pgoff);
7734+ info.threadstack_offset = offset;
7735 addr = vm_unmapped_area(&info);
7736 if (!(addr & ~PAGE_MASK))
7737 goto found_addr;
7738@@ -249,6 +261,13 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
7739 mm->mmap_legacy_base = mmap_legacy_base();
7740 mm->mmap_base = mmap_upper_limit();
7741
7742+#ifdef CONFIG_PAX_RANDMMAP
7743+ if (mm->pax_flags & MF_PAX_RANDMMAP) {
7744+ mm->mmap_legacy_base += mm->delta_mmap;
7745+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
7746+ }
7747+#endif
7748+
7749 if (mmap_is_legacy()) {
7750 mm->mmap_base = mm->mmap_legacy_base;
7751 mm->get_unmapped_area = arch_get_unmapped_area;
7752diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
7753index 47ee620..1107387 100644
7754--- a/arch/parisc/kernel/traps.c
7755+++ b/arch/parisc/kernel/traps.c
7756@@ -726,9 +726,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
7757
7758 down_read(&current->mm->mmap_sem);
7759 vma = find_vma(current->mm,regs->iaoq[0]);
7760- if (vma && (regs->iaoq[0] >= vma->vm_start)
7761- && (vma->vm_flags & VM_EXEC)) {
7762-
7763+ if (vma && (regs->iaoq[0] >= vma->vm_start)) {
7764 fault_address = regs->iaoq[0];
7765 fault_space = regs->iasq[0];
7766
7767diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
7768index d72197f..c017c84 100644
7769--- a/arch/parisc/mm/fault.c
7770+++ b/arch/parisc/mm/fault.c
7771@@ -15,6 +15,7 @@
7772 #include <linux/sched.h>
7773 #include <linux/interrupt.h>
7774 #include <linux/module.h>
7775+#include <linux/unistd.h>
7776
7777 #include <asm/uaccess.h>
7778 #include <asm/traps.h>
7779@@ -50,7 +51,7 @@ int show_unhandled_signals = 1;
7780 static unsigned long
7781 parisc_acctyp(unsigned long code, unsigned int inst)
7782 {
7783- if (code == 6 || code == 16)
7784+ if (code == 6 || code == 7 || code == 16)
7785 return VM_EXEC;
7786
7787 switch (inst & 0xf0000000) {
7788@@ -136,6 +137,116 @@ parisc_acctyp(unsigned long code, unsigned int inst)
7789 }
7790 #endif
7791
7792+#ifdef CONFIG_PAX_PAGEEXEC
7793+/*
7794+ * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
7795+ *
7796+ * returns 1 when task should be killed
7797+ * 2 when rt_sigreturn trampoline was detected
7798+ * 3 when unpatched PLT trampoline was detected
7799+ */
7800+static int pax_handle_fetch_fault(struct pt_regs *regs)
7801+{
7802+
7803+#ifdef CONFIG_PAX_EMUPLT
7804+ int err;
7805+
7806+ do { /* PaX: unpatched PLT emulation */
7807+ unsigned int bl, depwi;
7808+
7809+ err = get_user(bl, (unsigned int *)instruction_pointer(regs));
7810+ err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
7811+
7812+ if (err)
7813+ break;
7814+
7815+ if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
7816+ unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
7817+
7818+ err = get_user(ldw, (unsigned int *)addr);
7819+ err |= get_user(bv, (unsigned int *)(addr+4));
7820+ err |= get_user(ldw2, (unsigned int *)(addr+8));
7821+
7822+ if (err)
7823+ break;
7824+
7825+ if (ldw == 0x0E801096U &&
7826+ bv == 0xEAC0C000U &&
7827+ ldw2 == 0x0E881095U)
7828+ {
7829+ unsigned int resolver, map;
7830+
7831+ err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
7832+ err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
7833+ if (err)
7834+ break;
7835+
7836+ regs->gr[20] = instruction_pointer(regs)+8;
7837+ regs->gr[21] = map;
7838+ regs->gr[22] = resolver;
7839+ regs->iaoq[0] = resolver | 3UL;
7840+ regs->iaoq[1] = regs->iaoq[0] + 4;
7841+ return 3;
7842+ }
7843+ }
7844+ } while (0);
7845+#endif
7846+
7847+#ifdef CONFIG_PAX_EMUTRAMP
7848+
7849+#ifndef CONFIG_PAX_EMUSIGRT
7850+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
7851+ return 1;
7852+#endif
7853+
7854+ do { /* PaX: rt_sigreturn emulation */
7855+ unsigned int ldi1, ldi2, bel, nop;
7856+
7857+ err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
7858+ err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
7859+ err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
7860+ err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
7861+
7862+ if (err)
7863+ break;
7864+
7865+ if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
7866+ ldi2 == 0x3414015AU &&
7867+ bel == 0xE4008200U &&
7868+ nop == 0x08000240U)
7869+ {
7870+ regs->gr[25] = (ldi1 & 2) >> 1;
7871+ regs->gr[20] = __NR_rt_sigreturn;
7872+ regs->gr[31] = regs->iaoq[1] + 16;
7873+ regs->sr[0] = regs->iasq[1];
7874+ regs->iaoq[0] = 0x100UL;
7875+ regs->iaoq[1] = regs->iaoq[0] + 4;
7876+ regs->iasq[0] = regs->sr[2];
7877+ regs->iasq[1] = regs->sr[2];
7878+ return 2;
7879+ }
7880+ } while (0);
7881+#endif
7882+
7883+ return 1;
7884+}
7885+
7886+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
7887+{
7888+ unsigned long i;
7889+
7890+ printk(KERN_ERR "PAX: bytes at PC: ");
7891+ for (i = 0; i < 5; i++) {
7892+ unsigned int c;
7893+ if (get_user(c, (unsigned int *)pc+i))
7894+ printk(KERN_CONT "???????? ");
7895+ else
7896+ printk(KERN_CONT "%08x ", c);
7897+ }
7898+ printk("\n");
7899+}
7900+#endif
7901+
7902 int fixup_exception(struct pt_regs *regs)
7903 {
7904 const struct exception_table_entry *fix;
7905@@ -234,8 +345,33 @@ retry:
7906
7907 good_area:
7908
7909- if ((vma->vm_flags & acc_type) != acc_type)
7910+ if ((vma->vm_flags & acc_type) != acc_type) {
7911+
7912+#ifdef CONFIG_PAX_PAGEEXEC
7913+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
7914+ (address & ~3UL) == instruction_pointer(regs))
7915+ {
7916+ up_read(&mm->mmap_sem);
7917+ switch (pax_handle_fetch_fault(regs)) {
7918+
7919+#ifdef CONFIG_PAX_EMUPLT
7920+ case 3:
7921+ return;
7922+#endif
7923+
7924+#ifdef CONFIG_PAX_EMUTRAMP
7925+ case 2:
7926+ return;
7927+#endif
7928+
7929+ }
7930+ pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
7931+ do_group_exit(SIGKILL);
7932+ }
7933+#endif
7934+
7935 goto bad_area;
7936+ }
7937
7938 /*
7939 * If for any reason at all we couldn't handle the fault, make
7940diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
7941index 957bf34..3430cc8 100644
7942--- a/arch/powerpc/Kconfig
7943+++ b/arch/powerpc/Kconfig
7944@@ -393,6 +393,7 @@ config PPC64_SUPPORTS_MEMORY_FAILURE
7945 config KEXEC
7946 bool "kexec system call"
7947 depends on (PPC_BOOK3S || FSL_BOOKE || (44x && !SMP))
7948+ depends on !GRKERNSEC_KMEM
7949 help
7950 kexec is a system call that implements the ability to shutdown your
7951 current kernel, and to start another kernel. It is like a reboot
7952diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h
7953index e3b1d41..8e81edf 100644
7954--- a/arch/powerpc/include/asm/atomic.h
7955+++ b/arch/powerpc/include/asm/atomic.h
7956@@ -523,6 +523,16 @@ static __inline__ long atomic64_inc_not_zero(atomic64_t *v)
7957 return t1;
7958 }
7959
7960+#define atomic64_read_unchecked(v) atomic64_read(v)
7961+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
7962+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
7963+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
7964+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
7965+#define atomic64_inc_unchecked(v) atomic64_inc(v)
7966+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
7967+#define atomic64_dec_unchecked(v) atomic64_dec(v)
7968+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
7969+
7970 #endif /* __powerpc64__ */
7971
7972 #endif /* __KERNEL__ */
7973diff --git a/arch/powerpc/include/asm/barrier.h b/arch/powerpc/include/asm/barrier.h
7974index f89da80..7f5b05a 100644
7975--- a/arch/powerpc/include/asm/barrier.h
7976+++ b/arch/powerpc/include/asm/barrier.h
7977@@ -73,7 +73,7 @@
7978 do { \
7979 compiletime_assert_atomic_type(*p); \
7980 __lwsync(); \
7981- ACCESS_ONCE(*p) = (v); \
7982+ ACCESS_ONCE_RW(*p) = (v); \
7983 } while (0)
7984
7985 #define smp_load_acquire(p) \
7986diff --git a/arch/powerpc/include/asm/cache.h b/arch/powerpc/include/asm/cache.h
7987index ed0afc1..0332825 100644
7988--- a/arch/powerpc/include/asm/cache.h
7989+++ b/arch/powerpc/include/asm/cache.h
7990@@ -3,6 +3,7 @@
7991
7992 #ifdef __KERNEL__
7993
7994+#include <linux/const.h>
7995
7996 /* bytes per L1 cache line */
7997 #if defined(CONFIG_8xx) || defined(CONFIG_403GCX)
7998@@ -22,7 +23,7 @@
7999 #define L1_CACHE_SHIFT 7
8000 #endif
8001
8002-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
8003+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
8004
8005 #define SMP_CACHE_BYTES L1_CACHE_BYTES
8006
8007diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
8008index 935b5e7..7001d2d 100644
8009--- a/arch/powerpc/include/asm/elf.h
8010+++ b/arch/powerpc/include/asm/elf.h
8011@@ -28,8 +28,19 @@
8012 the loader. We need to make sure that it is out of the way of the program
8013 that it will "exec", and that there is sufficient room for the brk. */
8014
8015-extern unsigned long randomize_et_dyn(unsigned long base);
8016-#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000))
8017+#define ELF_ET_DYN_BASE (0x20000000)
8018+
8019+#ifdef CONFIG_PAX_ASLR
8020+#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
8021+
8022+#ifdef __powerpc64__
8023+#define PAX_DELTA_MMAP_LEN (is_32bit_task() ? 16 : 28)
8024+#define PAX_DELTA_STACK_LEN (is_32bit_task() ? 16 : 28)
8025+#else
8026+#define PAX_DELTA_MMAP_LEN 15
8027+#define PAX_DELTA_STACK_LEN 15
8028+#endif
8029+#endif
8030
8031 #define ELF_CORE_EFLAGS (is_elf2_task() ? 2 : 0)
8032
8033@@ -127,10 +138,6 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
8034 (0x7ff >> (PAGE_SHIFT - 12)) : \
8035 (0x3ffff >> (PAGE_SHIFT - 12)))
8036
8037-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
8038-#define arch_randomize_brk arch_randomize_brk
8039-
8040-
8041 #ifdef CONFIG_SPU_BASE
8042 /* Notes used in ET_CORE. Note name is "SPU/<fd>/<filename>". */
8043 #define NT_SPU 1
8044diff --git a/arch/powerpc/include/asm/exec.h b/arch/powerpc/include/asm/exec.h
8045index 8196e9c..d83a9f3 100644
8046--- a/arch/powerpc/include/asm/exec.h
8047+++ b/arch/powerpc/include/asm/exec.h
8048@@ -4,6 +4,6 @@
8049 #ifndef _ASM_POWERPC_EXEC_H
8050 #define _ASM_POWERPC_EXEC_H
8051
8052-extern unsigned long arch_align_stack(unsigned long sp);
8053+#define arch_align_stack(x) ((x) & ~0xfUL)
8054
8055 #endif /* _ASM_POWERPC_EXEC_H */
8056diff --git a/arch/powerpc/include/asm/kmap_types.h b/arch/powerpc/include/asm/kmap_types.h
8057index 5acabbd..7ea14fa 100644
8058--- a/arch/powerpc/include/asm/kmap_types.h
8059+++ b/arch/powerpc/include/asm/kmap_types.h
8060@@ -10,7 +10,7 @@
8061 * 2 of the License, or (at your option) any later version.
8062 */
8063
8064-#define KM_TYPE_NR 16
8065+#define KM_TYPE_NR 17
8066
8067 #endif /* __KERNEL__ */
8068 #endif /* _ASM_POWERPC_KMAP_TYPES_H */
8069diff --git a/arch/powerpc/include/asm/local.h b/arch/powerpc/include/asm/local.h
8070index b8da913..60b608a 100644
8071--- a/arch/powerpc/include/asm/local.h
8072+++ b/arch/powerpc/include/asm/local.h
8073@@ -9,15 +9,26 @@ typedef struct
8074 atomic_long_t a;
8075 } local_t;
8076
8077+typedef struct
8078+{
8079+ atomic_long_unchecked_t a;
8080+} local_unchecked_t;
8081+
8082 #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
8083
8084 #define local_read(l) atomic_long_read(&(l)->a)
8085+#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
8086 #define local_set(l,i) atomic_long_set(&(l)->a, (i))
8087+#define local_set_unchecked(l,i) atomic_long_set_unchecked(&(l)->a, (i))
8088
8089 #define local_add(i,l) atomic_long_add((i),(&(l)->a))
8090+#define local_add_unchecked(i,l) atomic_long_add_unchecked((i),(&(l)->a))
8091 #define local_sub(i,l) atomic_long_sub((i),(&(l)->a))
8092+#define local_sub_unchecked(i,l) atomic_long_sub_unchecked((i),(&(l)->a))
8093 #define local_inc(l) atomic_long_inc(&(l)->a)
8094+#define local_inc_unchecked(l) atomic_long_inc_unchecked(&(l)->a)
8095 #define local_dec(l) atomic_long_dec(&(l)->a)
8096+#define local_dec_unchecked(l) atomic_long_dec_unchecked(&(l)->a)
8097
8098 static __inline__ long local_add_return(long a, local_t *l)
8099 {
8100@@ -35,6 +46,7 @@ static __inline__ long local_add_return(long a, local_t *l)
8101
8102 return t;
8103 }
8104+#define local_add_return_unchecked(i, l) atomic_long_add_return_unchecked((i), (&(l)->a))
8105
8106 #define local_add_negative(a, l) (local_add_return((a), (l)) < 0)
8107
8108@@ -54,6 +66,7 @@ static __inline__ long local_sub_return(long a, local_t *l)
8109
8110 return t;
8111 }
8112+#define local_sub_return_unchecked(i, l) atomic_long_sub_return_unchecked((i), (&(l)->a))
8113
8114 static __inline__ long local_inc_return(local_t *l)
8115 {
8116@@ -101,6 +114,8 @@ static __inline__ long local_dec_return(local_t *l)
8117
8118 #define local_cmpxchg(l, o, n) \
8119 (cmpxchg_local(&((l)->a.counter), (o), (n)))
8120+#define local_cmpxchg_unchecked(l, o, n) \
8121+ (cmpxchg_local(&((l)->a.counter), (o), (n)))
8122 #define local_xchg(l, n) (xchg_local(&((l)->a.counter), (n)))
8123
8124 /**
8125diff --git a/arch/powerpc/include/asm/mman.h b/arch/powerpc/include/asm/mman.h
8126index 8565c25..2865190 100644
8127--- a/arch/powerpc/include/asm/mman.h
8128+++ b/arch/powerpc/include/asm/mman.h
8129@@ -24,7 +24,7 @@ static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot)
8130 }
8131 #define arch_calc_vm_prot_bits(prot) arch_calc_vm_prot_bits(prot)
8132
8133-static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags)
8134+static inline pgprot_t arch_vm_get_page_prot(vm_flags_t vm_flags)
8135 {
8136 return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : __pgprot(0);
8137 }
8138diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
8139index 32e4e21..62afb12 100644
8140--- a/arch/powerpc/include/asm/page.h
8141+++ b/arch/powerpc/include/asm/page.h
8142@@ -230,8 +230,9 @@ extern long long virt_phys_offset;
8143 * and needs to be executable. This means the whole heap ends
8144 * up being executable.
8145 */
8146-#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
8147- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
8148+#define VM_DATA_DEFAULT_FLAGS32 \
8149+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
8150+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
8151
8152 #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
8153 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
8154@@ -259,6 +260,9 @@ extern long long virt_phys_offset;
8155 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
8156 #endif
8157
8158+#define ktla_ktva(addr) (addr)
8159+#define ktva_ktla(addr) (addr)
8160+
8161 #ifndef CONFIG_PPC_BOOK3S_64
8162 /*
8163 * Use the top bit of the higher-level page table entries to indicate whether
8164diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h
8165index 88693ce..ac6f9ab 100644
8166--- a/arch/powerpc/include/asm/page_64.h
8167+++ b/arch/powerpc/include/asm/page_64.h
8168@@ -153,15 +153,18 @@ do { \
8169 * stack by default, so in the absence of a PT_GNU_STACK program header
8170 * we turn execute permission off.
8171 */
8172-#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
8173- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
8174+#define VM_STACK_DEFAULT_FLAGS32 \
8175+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
8176+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
8177
8178 #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
8179 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
8180
8181+#ifndef CONFIG_PAX_PAGEEXEC
8182 #define VM_STACK_DEFAULT_FLAGS \
8183 (is_32bit_task() ? \
8184 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
8185+#endif
8186
8187 #include <asm-generic/getorder.h>
8188
8189diff --git a/arch/powerpc/include/asm/pgalloc-64.h b/arch/powerpc/include/asm/pgalloc-64.h
8190index 4b0be20..c15a27d 100644
8191--- a/arch/powerpc/include/asm/pgalloc-64.h
8192+++ b/arch/powerpc/include/asm/pgalloc-64.h
8193@@ -54,6 +54,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
8194 #ifndef CONFIG_PPC_64K_PAGES
8195
8196 #define pgd_populate(MM, PGD, PUD) pgd_set(PGD, PUD)
8197+#define pgd_populate_kernel(MM, PGD, PUD) pgd_populate((MM), (PGD), (PUD))
8198
8199 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
8200 {
8201@@ -71,6 +72,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
8202 pud_set(pud, (unsigned long)pmd);
8203 }
8204
8205+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
8206+{
8207+ pud_populate(mm, pud, pmd);
8208+}
8209+
8210 #define pmd_populate(mm, pmd, pte_page) \
8211 pmd_populate_kernel(mm, pmd, page_address(pte_page))
8212 #define pmd_populate_kernel(mm, pmd, pte) pmd_set(pmd, (unsigned long)(pte))
8213@@ -173,6 +179,7 @@ extern void __tlb_remove_table(void *_table);
8214 #endif
8215
8216 #define pud_populate(mm, pud, pmd) pud_set(pud, (unsigned long)pmd)
8217+#define pud_populate_kernel(mm, pud, pmd) pud_populate((mm), (pud), (pmd))
8218
8219 static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
8220 pte_t *pte)
8221diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
8222index 3ebb188..e17dddf 100644
8223--- a/arch/powerpc/include/asm/pgtable.h
8224+++ b/arch/powerpc/include/asm/pgtable.h
8225@@ -2,6 +2,7 @@
8226 #define _ASM_POWERPC_PGTABLE_H
8227 #ifdef __KERNEL__
8228
8229+#include <linux/const.h>
8230 #ifndef __ASSEMBLY__
8231 #include <linux/mmdebug.h>
8232 #include <asm/processor.h> /* For TASK_SIZE */
8233diff --git a/arch/powerpc/include/asm/pte-hash32.h b/arch/powerpc/include/asm/pte-hash32.h
8234index 4aad413..85d86bf 100644
8235--- a/arch/powerpc/include/asm/pte-hash32.h
8236+++ b/arch/powerpc/include/asm/pte-hash32.h
8237@@ -21,6 +21,7 @@
8238 #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
8239 #define _PAGE_USER 0x004 /* usermode access allowed */
8240 #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
8241+#define _PAGE_EXEC _PAGE_GUARDED
8242 #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
8243 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
8244 #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
8245diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
8246index ce17815..c5574cc 100644
8247--- a/arch/powerpc/include/asm/reg.h
8248+++ b/arch/powerpc/include/asm/reg.h
8249@@ -249,6 +249,7 @@
8250 #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
8251 #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
8252 #define DSISR_NOHPTE 0x40000000 /* no translation found */
8253+#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
8254 #define DSISR_PROTFAULT 0x08000000 /* protection fault */
8255 #define DSISR_ISSTORE 0x02000000 /* access was a store */
8256 #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
8257diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h
8258index 084e080..9415a3d 100644
8259--- a/arch/powerpc/include/asm/smp.h
8260+++ b/arch/powerpc/include/asm/smp.h
8261@@ -51,7 +51,7 @@ struct smp_ops_t {
8262 int (*cpu_disable)(void);
8263 void (*cpu_die)(unsigned int nr);
8264 int (*cpu_bootable)(unsigned int nr);
8265-};
8266+} __no_const;
8267
8268 extern void smp_send_debugger_break(void);
8269 extern void start_secondary_resume(void);
8270diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
8271index b034ecd..af7e31f 100644
8272--- a/arch/powerpc/include/asm/thread_info.h
8273+++ b/arch/powerpc/include/asm/thread_info.h
8274@@ -107,6 +107,8 @@ static inline struct thread_info *current_thread_info(void)
8275 #if defined(CONFIG_PPC64)
8276 #define TIF_ELF2ABI 18 /* function descriptors must die! */
8277 #endif
8278+/* mask must be expressable within 16 bits to satisfy 'andi' instruction reqs */
8279+#define TIF_GRSEC_SETXID 6 /* update credentials on syscall entry/exit */
8280
8281 /* as above, but as bit values */
8282 #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
8283@@ -125,9 +127,10 @@ static inline struct thread_info *current_thread_info(void)
8284 #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
8285 #define _TIF_EMULATE_STACK_STORE (1<<TIF_EMULATE_STACK_STORE)
8286 #define _TIF_NOHZ (1<<TIF_NOHZ)
8287+#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
8288 #define _TIF_SYSCALL_T_OR_A (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
8289 _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT | \
8290- _TIF_NOHZ)
8291+ _TIF_NOHZ | _TIF_GRSEC_SETXID)
8292
8293 #define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
8294 _TIF_NOTIFY_RESUME | _TIF_UPROBE | \
8295diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
8296index 9485b43..3bd3c16 100644
8297--- a/arch/powerpc/include/asm/uaccess.h
8298+++ b/arch/powerpc/include/asm/uaccess.h
8299@@ -58,6 +58,7 @@
8300
8301 #endif
8302
8303+#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
8304 #define access_ok(type, addr, size) \
8305 (__chk_user_ptr(addr), \
8306 __access_ok((__force unsigned long)(addr), (size), get_fs()))
8307@@ -318,52 +319,6 @@ do { \
8308 extern unsigned long __copy_tofrom_user(void __user *to,
8309 const void __user *from, unsigned long size);
8310
8311-#ifndef __powerpc64__
8312-
8313-static inline unsigned long copy_from_user(void *to,
8314- const void __user *from, unsigned long n)
8315-{
8316- unsigned long over;
8317-
8318- if (access_ok(VERIFY_READ, from, n))
8319- return __copy_tofrom_user((__force void __user *)to, from, n);
8320- if ((unsigned long)from < TASK_SIZE) {
8321- over = (unsigned long)from + n - TASK_SIZE;
8322- return __copy_tofrom_user((__force void __user *)to, from,
8323- n - over) + over;
8324- }
8325- return n;
8326-}
8327-
8328-static inline unsigned long copy_to_user(void __user *to,
8329- const void *from, unsigned long n)
8330-{
8331- unsigned long over;
8332-
8333- if (access_ok(VERIFY_WRITE, to, n))
8334- return __copy_tofrom_user(to, (__force void __user *)from, n);
8335- if ((unsigned long)to < TASK_SIZE) {
8336- over = (unsigned long)to + n - TASK_SIZE;
8337- return __copy_tofrom_user(to, (__force void __user *)from,
8338- n - over) + over;
8339- }
8340- return n;
8341-}
8342-
8343-#else /* __powerpc64__ */
8344-
8345-#define __copy_in_user(to, from, size) \
8346- __copy_tofrom_user((to), (from), (size))
8347-
8348-extern unsigned long copy_from_user(void *to, const void __user *from,
8349- unsigned long n);
8350-extern unsigned long copy_to_user(void __user *to, const void *from,
8351- unsigned long n);
8352-extern unsigned long copy_in_user(void __user *to, const void __user *from,
8353- unsigned long n);
8354-
8355-#endif /* __powerpc64__ */
8356-
8357 static inline unsigned long __copy_from_user_inatomic(void *to,
8358 const void __user *from, unsigned long n)
8359 {
8360@@ -387,6 +342,10 @@ static inline unsigned long __copy_from_user_inatomic(void *to,
8361 if (ret == 0)
8362 return 0;
8363 }
8364+
8365+ if (!__builtin_constant_p(n))
8366+ check_object_size(to, n, false);
8367+
8368 return __copy_tofrom_user((__force void __user *)to, from, n);
8369 }
8370
8371@@ -413,6 +372,10 @@ static inline unsigned long __copy_to_user_inatomic(void __user *to,
8372 if (ret == 0)
8373 return 0;
8374 }
8375+
8376+ if (!__builtin_constant_p(n))
8377+ check_object_size(from, n, true);
8378+
8379 return __copy_tofrom_user(to, (__force const void __user *)from, n);
8380 }
8381
8382@@ -430,6 +393,92 @@ static inline unsigned long __copy_to_user(void __user *to,
8383 return __copy_to_user_inatomic(to, from, size);
8384 }
8385
8386+#ifndef __powerpc64__
8387+
8388+static inline unsigned long __must_check copy_from_user(void *to,
8389+ const void __user *from, unsigned long n)
8390+{
8391+ unsigned long over;
8392+
8393+ if ((long)n < 0)
8394+ return n;
8395+
8396+ if (access_ok(VERIFY_READ, from, n)) {
8397+ if (!__builtin_constant_p(n))
8398+ check_object_size(to, n, false);
8399+ return __copy_tofrom_user((__force void __user *)to, from, n);
8400+ }
8401+ if ((unsigned long)from < TASK_SIZE) {
8402+ over = (unsigned long)from + n - TASK_SIZE;
8403+ if (!__builtin_constant_p(n - over))
8404+ check_object_size(to, n - over, false);
8405+ return __copy_tofrom_user((__force void __user *)to, from,
8406+ n - over) + over;
8407+ }
8408+ return n;
8409+}
8410+
8411+static inline unsigned long __must_check copy_to_user(void __user *to,
8412+ const void *from, unsigned long n)
8413+{
8414+ unsigned long over;
8415+
8416+ if ((long)n < 0)
8417+ return n;
8418+
8419+ if (access_ok(VERIFY_WRITE, to, n)) {
8420+ if (!__builtin_constant_p(n))
8421+ check_object_size(from, n, true);
8422+ return __copy_tofrom_user(to, (__force void __user *)from, n);
8423+ }
8424+ if ((unsigned long)to < TASK_SIZE) {
8425+ over = (unsigned long)to + n - TASK_SIZE;
8426+ if (!__builtin_constant_p(n))
8427+ check_object_size(from, n - over, true);
8428+ return __copy_tofrom_user(to, (__force void __user *)from,
8429+ n - over) + over;
8430+ }
8431+ return n;
8432+}
8433+
8434+#else /* __powerpc64__ */
8435+
8436+#define __copy_in_user(to, from, size) \
8437+ __copy_tofrom_user((to), (from), (size))
8438+
8439+static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
8440+{
8441+ if ((long)n < 0 || n > INT_MAX)
8442+ return n;
8443+
8444+ if (!__builtin_constant_p(n))
8445+ check_object_size(to, n, false);
8446+
8447+ if (likely(access_ok(VERIFY_READ, from, n)))
8448+ n = __copy_from_user(to, from, n);
8449+ else
8450+ memset(to, 0, n);
8451+ return n;
8452+}
8453+
8454+static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
8455+{
8456+ if ((long)n < 0 || n > INT_MAX)
8457+ return n;
8458+
8459+ if (likely(access_ok(VERIFY_WRITE, to, n))) {
8460+ if (!__builtin_constant_p(n))
8461+ check_object_size(from, n, true);
8462+ n = __copy_to_user(to, from, n);
8463+ }
8464+ return n;
8465+}
8466+
8467+extern unsigned long copy_in_user(void __user *to, const void __user *from,
8468+ unsigned long n);
8469+
8470+#endif /* __powerpc64__ */
8471+
8472 extern unsigned long __clear_user(void __user *addr, unsigned long size);
8473
8474 static inline unsigned long clear_user(void __user *addr, unsigned long size)
8475diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
8476index fcc9a89..07be2bb 100644
8477--- a/arch/powerpc/kernel/Makefile
8478+++ b/arch/powerpc/kernel/Makefile
8479@@ -26,6 +26,8 @@ CFLAGS_REMOVE_ftrace.o = -pg -mno-sched-epilog
8480 CFLAGS_REMOVE_time.o = -pg -mno-sched-epilog
8481 endif
8482
8483+CFLAGS_REMOVE_prom_init.o += $(LATENT_ENTROPY_PLUGIN_CFLAGS)
8484+
8485 obj-y := cputable.o ptrace.o syscalls.o \
8486 irq.o align.o signal_32.o pmc.o vdso.o \
8487 process.o systbl.o idle.o \
8488diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
8489index 063b65d..7a26e9d 100644
8490--- a/arch/powerpc/kernel/exceptions-64e.S
8491+++ b/arch/powerpc/kernel/exceptions-64e.S
8492@@ -771,6 +771,7 @@ storage_fault_common:
8493 std r14,_DAR(r1)
8494 std r15,_DSISR(r1)
8495 addi r3,r1,STACK_FRAME_OVERHEAD
8496+ bl .save_nvgprs
8497 mr r4,r14
8498 mr r5,r15
8499 ld r14,PACA_EXGEN+EX_R14(r13)
8500@@ -779,8 +780,7 @@ storage_fault_common:
8501 cmpdi r3,0
8502 bne- 1f
8503 b .ret_from_except_lite
8504-1: bl .save_nvgprs
8505- mr r5,r3
8506+1: mr r5,r3
8507 addi r3,r1,STACK_FRAME_OVERHEAD
8508 ld r4,_DAR(r1)
8509 bl .bad_page_fault
8510diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
8511index 38d5073..f00af8d 100644
8512--- a/arch/powerpc/kernel/exceptions-64s.S
8513+++ b/arch/powerpc/kernel/exceptions-64s.S
8514@@ -1584,10 +1584,10 @@ handle_page_fault:
8515 11: ld r4,_DAR(r1)
8516 ld r5,_DSISR(r1)
8517 addi r3,r1,STACK_FRAME_OVERHEAD
8518+ bl .save_nvgprs
8519 bl .do_page_fault
8520 cmpdi r3,0
8521 beq+ 12f
8522- bl .save_nvgprs
8523 mr r5,r3
8524 addi r3,r1,STACK_FRAME_OVERHEAD
8525 lwz r4,_DAR(r1)
8526diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
8527index 1d0848b..d74685f 100644
8528--- a/arch/powerpc/kernel/irq.c
8529+++ b/arch/powerpc/kernel/irq.c
8530@@ -447,6 +447,8 @@ void migrate_irqs(void)
8531 }
8532 #endif
8533
8534+extern void gr_handle_kernel_exploit(void);
8535+
8536 static inline void check_stack_overflow(void)
8537 {
8538 #ifdef CONFIG_DEBUG_STACKOVERFLOW
8539@@ -459,6 +461,7 @@ static inline void check_stack_overflow(void)
8540 printk("do_IRQ: stack overflow: %ld\n",
8541 sp - sizeof(struct thread_info));
8542 dump_stack();
8543+ gr_handle_kernel_exploit();
8544 }
8545 #endif
8546 }
8547diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c
8548index 6cff040..74ac5d1 100644
8549--- a/arch/powerpc/kernel/module_32.c
8550+++ b/arch/powerpc/kernel/module_32.c
8551@@ -161,7 +161,7 @@ int module_frob_arch_sections(Elf32_Ehdr *hdr,
8552 me->arch.core_plt_section = i;
8553 }
8554 if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
8555- printk("Module doesn't contain .plt or .init.plt sections.\n");
8556+ printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name);
8557 return -ENOEXEC;
8558 }
8559
8560@@ -191,11 +191,16 @@ static uint32_t do_plt_call(void *location,
8561
8562 DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
8563 /* Init, or core PLT? */
8564- if (location >= mod->module_core
8565- && location < mod->module_core + mod->core_size)
8566+ if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
8567+ (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
8568 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
8569- else
8570+ else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
8571+ (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
8572 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
8573+ else {
8574+ printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
8575+ return ~0UL;
8576+ }
8577
8578 /* Find this entry, or if that fails, the next avail. entry */
8579 while (entry->jump[0]) {
8580@@ -299,7 +304,7 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
8581 }
8582 #ifdef CONFIG_DYNAMIC_FTRACE
8583 module->arch.tramp =
8584- do_plt_call(module->module_core,
8585+ do_plt_call(module->module_core_rx,
8586 (unsigned long)ftrace_caller,
8587 sechdrs, module);
8588 #endif
8589diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
8590index 31d0215..206af70 100644
8591--- a/arch/powerpc/kernel/process.c
8592+++ b/arch/powerpc/kernel/process.c
8593@@ -1031,8 +1031,8 @@ void show_regs(struct pt_regs * regs)
8594 * Lookup NIP late so we have the best change of getting the
8595 * above info out without failing
8596 */
8597- printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
8598- printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
8599+ printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
8600+ printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
8601 #endif
8602 show_stack(current, (unsigned long *) regs->gpr[1]);
8603 if (!user_mode(regs))
8604@@ -1554,10 +1554,10 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
8605 newsp = stack[0];
8606 ip = stack[STACK_FRAME_LR_SAVE];
8607 if (!firstframe || ip != lr) {
8608- printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
8609+ printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
8610 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
8611 if ((ip == rth || ip == mrth) && curr_frame >= 0) {
8612- printk(" (%pS)",
8613+ printk(" (%pA)",
8614 (void *)current->ret_stack[curr_frame].ret);
8615 curr_frame--;
8616 }
8617@@ -1577,7 +1577,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
8618 struct pt_regs *regs = (struct pt_regs *)
8619 (sp + STACK_FRAME_OVERHEAD);
8620 lr = regs->link;
8621- printk("--- Exception: %lx at %pS\n LR = %pS\n",
8622+ printk("--- Exception: %lx at %pA\n LR = %pA\n",
8623 regs->trap, (void *)regs->nip, (void *)lr);
8624 firstframe = 1;
8625 }
8626@@ -1613,58 +1613,3 @@ void notrace __ppc64_runlatch_off(void)
8627 mtspr(SPRN_CTRLT, ctrl);
8628 }
8629 #endif /* CONFIG_PPC64 */
8630-
8631-unsigned long arch_align_stack(unsigned long sp)
8632-{
8633- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
8634- sp -= get_random_int() & ~PAGE_MASK;
8635- return sp & ~0xf;
8636-}
8637-
8638-static inline unsigned long brk_rnd(void)
8639-{
8640- unsigned long rnd = 0;
8641-
8642- /* 8MB for 32bit, 1GB for 64bit */
8643- if (is_32bit_task())
8644- rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
8645- else
8646- rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
8647-
8648- return rnd << PAGE_SHIFT;
8649-}
8650-
8651-unsigned long arch_randomize_brk(struct mm_struct *mm)
8652-{
8653- unsigned long base = mm->brk;
8654- unsigned long ret;
8655-
8656-#ifdef CONFIG_PPC_STD_MMU_64
8657- /*
8658- * If we are using 1TB segments and we are allowed to randomise
8659- * the heap, we can put it above 1TB so it is backed by a 1TB
8660- * segment. Otherwise the heap will be in the bottom 1TB
8661- * which always uses 256MB segments and this may result in a
8662- * performance penalty.
8663- */
8664- if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
8665- base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
8666-#endif
8667-
8668- ret = PAGE_ALIGN(base + brk_rnd());
8669-
8670- if (ret < mm->brk)
8671- return mm->brk;
8672-
8673- return ret;
8674-}
8675-
8676-unsigned long randomize_et_dyn(unsigned long base)
8677-{
8678- unsigned long ret = PAGE_ALIGN(base + brk_rnd());
8679-
8680- if (ret < base)
8681- return base;
8682-
8683- return ret;
8684-}
8685diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
8686index 2e3d2bf..35df241 100644
8687--- a/arch/powerpc/kernel/ptrace.c
8688+++ b/arch/powerpc/kernel/ptrace.c
8689@@ -1762,6 +1762,10 @@ long arch_ptrace(struct task_struct *child, long request,
8690 return ret;
8691 }
8692
8693+#ifdef CONFIG_GRKERNSEC_SETXID
8694+extern void gr_delayed_cred_worker(void);
8695+#endif
8696+
8697 /*
8698 * We must return the syscall number to actually look up in the table.
8699 * This can be -1L to skip running any syscall at all.
8700@@ -1774,6 +1778,11 @@ long do_syscall_trace_enter(struct pt_regs *regs)
8701
8702 secure_computing_strict(regs->gpr[0]);
8703
8704+#ifdef CONFIG_GRKERNSEC_SETXID
8705+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
8706+ gr_delayed_cred_worker();
8707+#endif
8708+
8709 if (test_thread_flag(TIF_SYSCALL_TRACE) &&
8710 tracehook_report_syscall_entry(regs))
8711 /*
8712@@ -1808,6 +1817,11 @@ void do_syscall_trace_leave(struct pt_regs *regs)
8713 {
8714 int step;
8715
8716+#ifdef CONFIG_GRKERNSEC_SETXID
8717+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
8718+ gr_delayed_cred_worker();
8719+#endif
8720+
8721 audit_syscall_exit(regs);
8722
8723 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
8724diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
8725index 4e47db6..6dcc96e 100644
8726--- a/arch/powerpc/kernel/signal_32.c
8727+++ b/arch/powerpc/kernel/signal_32.c
8728@@ -1013,7 +1013,7 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
8729 /* Save user registers on the stack */
8730 frame = &rt_sf->uc.uc_mcontext;
8731 addr = frame;
8732- if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
8733+ if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
8734 sigret = 0;
8735 tramp = current->mm->context.vdso_base + vdso32_rt_sigtramp;
8736 } else {
8737diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
8738index d501dc4..e5a0de0 100644
8739--- a/arch/powerpc/kernel/signal_64.c
8740+++ b/arch/powerpc/kernel/signal_64.c
8741@@ -760,7 +760,7 @@ int handle_rt_signal64(int signr, struct k_sigaction *ka, siginfo_t *info,
8742 current->thread.fp_state.fpscr = 0;
8743
8744 /* Set up to return from userspace. */
8745- if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
8746+ if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
8747 regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
8748 } else {
8749 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
8750diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
8751index 33cd7a0..d615344 100644
8752--- a/arch/powerpc/kernel/traps.c
8753+++ b/arch/powerpc/kernel/traps.c
8754@@ -142,6 +142,8 @@ static unsigned __kprobes long oops_begin(struct pt_regs *regs)
8755 return flags;
8756 }
8757
8758+extern void gr_handle_kernel_exploit(void);
8759+
8760 static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
8761 int signr)
8762 {
8763@@ -191,6 +193,9 @@ static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
8764 panic("Fatal exception in interrupt");
8765 if (panic_on_oops)
8766 panic("Fatal exception");
8767+
8768+ gr_handle_kernel_exploit();
8769+
8770 do_exit(signr);
8771 }
8772
8773diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
8774index 094e45c..d82b848 100644
8775--- a/arch/powerpc/kernel/vdso.c
8776+++ b/arch/powerpc/kernel/vdso.c
8777@@ -35,6 +35,7 @@
8778 #include <asm/vdso.h>
8779 #include <asm/vdso_datapage.h>
8780 #include <asm/setup.h>
8781+#include <asm/mman.h>
8782
8783 #undef DEBUG
8784
8785@@ -221,7 +222,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
8786 vdso_base = VDSO32_MBASE;
8787 #endif
8788
8789- current->mm->context.vdso_base = 0;
8790+ current->mm->context.vdso_base = ~0UL;
8791
8792 /* vDSO has a problem and was disabled, just don't "enable" it for the
8793 * process
8794@@ -241,7 +242,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
8795 vdso_base = get_unmapped_area(NULL, vdso_base,
8796 (vdso_pages << PAGE_SHIFT) +
8797 ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
8798- 0, 0);
8799+ 0, MAP_PRIVATE | MAP_EXECUTABLE);
8800 if (IS_ERR_VALUE(vdso_base)) {
8801 rc = vdso_base;
8802 goto fail_mmapsem;
8803diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
8804index 3cf541a..ab2d825 100644
8805--- a/arch/powerpc/kvm/powerpc.c
8806+++ b/arch/powerpc/kvm/powerpc.c
8807@@ -1153,7 +1153,7 @@ void kvmppc_init_lpid(unsigned long nr_lpids_param)
8808 }
8809 EXPORT_SYMBOL_GPL(kvmppc_init_lpid);
8810
8811-int kvm_arch_init(void *opaque)
8812+int kvm_arch_init(const void *opaque)
8813 {
8814 return 0;
8815 }
8816diff --git a/arch/powerpc/lib/usercopy_64.c b/arch/powerpc/lib/usercopy_64.c
8817index 5eea6f3..5d10396 100644
8818--- a/arch/powerpc/lib/usercopy_64.c
8819+++ b/arch/powerpc/lib/usercopy_64.c
8820@@ -9,22 +9,6 @@
8821 #include <linux/module.h>
8822 #include <asm/uaccess.h>
8823
8824-unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
8825-{
8826- if (likely(access_ok(VERIFY_READ, from, n)))
8827- n = __copy_from_user(to, from, n);
8828- else
8829- memset(to, 0, n);
8830- return n;
8831-}
8832-
8833-unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
8834-{
8835- if (likely(access_ok(VERIFY_WRITE, to, n)))
8836- n = __copy_to_user(to, from, n);
8837- return n;
8838-}
8839-
8840 unsigned long copy_in_user(void __user *to, const void __user *from,
8841 unsigned long n)
8842 {
8843@@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *to, const void __user *from,
8844 return n;
8845 }
8846
8847-EXPORT_SYMBOL(copy_from_user);
8848-EXPORT_SYMBOL(copy_to_user);
8849 EXPORT_SYMBOL(copy_in_user);
8850
8851diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
8852index 51ab9e7..7d3c78b 100644
8853--- a/arch/powerpc/mm/fault.c
8854+++ b/arch/powerpc/mm/fault.c
8855@@ -33,6 +33,10 @@
8856 #include <linux/magic.h>
8857 #include <linux/ratelimit.h>
8858 #include <linux/context_tracking.h>
8859+#include <linux/slab.h>
8860+#include <linux/pagemap.h>
8861+#include <linux/compiler.h>
8862+#include <linux/unistd.h>
8863
8864 #include <asm/firmware.h>
8865 #include <asm/page.h>
8866@@ -69,6 +73,33 @@ static inline int notify_page_fault(struct pt_regs *regs)
8867 }
8868 #endif
8869
8870+#ifdef CONFIG_PAX_PAGEEXEC
8871+/*
8872+ * PaX: decide what to do with offenders (regs->nip = fault address)
8873+ *
8874+ * returns 1 when task should be killed
8875+ */
8876+static int pax_handle_fetch_fault(struct pt_regs *regs)
8877+{
8878+ return 1;
8879+}
8880+
8881+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
8882+{
8883+ unsigned long i;
8884+
8885+ printk(KERN_ERR "PAX: bytes at PC: ");
8886+ for (i = 0; i < 5; i++) {
8887+ unsigned int c;
8888+ if (get_user(c, (unsigned int __user *)pc+i))
8889+ printk(KERN_CONT "???????? ");
8890+ else
8891+ printk(KERN_CONT "%08x ", c);
8892+ }
8893+ printk("\n");
8894+}
8895+#endif
8896+
8897 /*
8898 * Check whether the instruction at regs->nip is a store using
8899 * an update addressing form which will update r1.
8900@@ -216,7 +247,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
8901 * indicate errors in DSISR but can validly be set in SRR1.
8902 */
8903 if (trap == 0x400)
8904- error_code &= 0x48200000;
8905+ error_code &= 0x58200000;
8906 else
8907 is_write = error_code & DSISR_ISSTORE;
8908 #else
8909@@ -378,7 +409,7 @@ good_area:
8910 * "undefined". Of those that can be set, this is the only
8911 * one which seems bad.
8912 */
8913- if (error_code & 0x10000000)
8914+ if (error_code & DSISR_GUARDED)
8915 /* Guarded storage error. */
8916 goto bad_area;
8917 #endif /* CONFIG_8xx */
8918@@ -393,7 +424,7 @@ good_area:
8919 * processors use the same I/D cache coherency mechanism
8920 * as embedded.
8921 */
8922- if (error_code & DSISR_PROTFAULT)
8923+ if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))
8924 goto bad_area;
8925 #endif /* CONFIG_PPC_STD_MMU */
8926
8927@@ -483,6 +514,23 @@ bad_area:
8928 bad_area_nosemaphore:
8929 /* User mode accesses cause a SIGSEGV */
8930 if (user_mode(regs)) {
8931+
8932+#ifdef CONFIG_PAX_PAGEEXEC
8933+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
8934+#ifdef CONFIG_PPC_STD_MMU
8935+ if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
8936+#else
8937+ if (is_exec && regs->nip == address) {
8938+#endif
8939+ switch (pax_handle_fetch_fault(regs)) {
8940+ }
8941+
8942+ pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
8943+ do_group_exit(SIGKILL);
8944+ }
8945+ }
8946+#endif
8947+
8948 _exception(SIGSEGV, regs, code, address);
8949 goto bail;
8950 }
8951diff --git a/arch/powerpc/mm/mmap.c b/arch/powerpc/mm/mmap.c
8952index cb8bdbe..cde4bc7 100644
8953--- a/arch/powerpc/mm/mmap.c
8954+++ b/arch/powerpc/mm/mmap.c
8955@@ -53,10 +53,14 @@ static inline int mmap_is_legacy(void)
8956 return sysctl_legacy_va_layout;
8957 }
8958
8959-static unsigned long mmap_rnd(void)
8960+static unsigned long mmap_rnd(struct mm_struct *mm)
8961 {
8962 unsigned long rnd = 0;
8963
8964+#ifdef CONFIG_PAX_RANDMMAP
8965+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
8966+#endif
8967+
8968 if (current->flags & PF_RANDOMIZE) {
8969 /* 8MB for 32bit, 1GB for 64bit */
8970 if (is_32bit_task())
8971@@ -67,7 +71,7 @@ static unsigned long mmap_rnd(void)
8972 return rnd << PAGE_SHIFT;
8973 }
8974
8975-static inline unsigned long mmap_base(void)
8976+static inline unsigned long mmap_base(struct mm_struct *mm)
8977 {
8978 unsigned long gap = rlimit(RLIMIT_STACK);
8979
8980@@ -76,7 +80,7 @@ static inline unsigned long mmap_base(void)
8981 else if (gap > MAX_GAP)
8982 gap = MAX_GAP;
8983
8984- return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
8985+ return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd(mm));
8986 }
8987
8988 /*
8989@@ -91,9 +95,21 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
8990 */
8991 if (mmap_is_legacy()) {
8992 mm->mmap_base = TASK_UNMAPPED_BASE;
8993+
8994+#ifdef CONFIG_PAX_RANDMMAP
8995+ if (mm->pax_flags & MF_PAX_RANDMMAP)
8996+ mm->mmap_base += mm->delta_mmap;
8997+#endif
8998+
8999 mm->get_unmapped_area = arch_get_unmapped_area;
9000 } else {
9001- mm->mmap_base = mmap_base();
9002+ mm->mmap_base = mmap_base(mm);
9003+
9004+#ifdef CONFIG_PAX_RANDMMAP
9005+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9006+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
9007+#endif
9008+
9009 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
9010 }
9011 }
9012diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
9013index b0c75cc..ef7fb93 100644
9014--- a/arch/powerpc/mm/slice.c
9015+++ b/arch/powerpc/mm/slice.c
9016@@ -103,7 +103,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
9017 if ((mm->task_size - len) < addr)
9018 return 0;
9019 vma = find_vma(mm, addr);
9020- return (!vma || (addr + len) <= vma->vm_start);
9021+ return check_heap_stack_gap(vma, addr, len, 0);
9022 }
9023
9024 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
9025@@ -277,6 +277,12 @@ static unsigned long slice_find_area_bottomup(struct mm_struct *mm,
9026 info.align_offset = 0;
9027
9028 addr = TASK_UNMAPPED_BASE;
9029+
9030+#ifdef CONFIG_PAX_RANDMMAP
9031+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9032+ addr += mm->delta_mmap;
9033+#endif
9034+
9035 while (addr < TASK_SIZE) {
9036 info.low_limit = addr;
9037 if (!slice_scan_available(addr, available, 1, &addr))
9038@@ -410,6 +416,11 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
9039 if (fixed && addr > (mm->task_size - len))
9040 return -ENOMEM;
9041
9042+#ifdef CONFIG_PAX_RANDMMAP
9043+ if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
9044+ addr = 0;
9045+#endif
9046+
9047 /* If hint, make sure it matches our alignment restrictions */
9048 if (!fixed && addr) {
9049 addr = _ALIGN_UP(addr, 1ul << pshift);
9050diff --git a/arch/powerpc/platforms/cell/celleb_scc_pciex.c b/arch/powerpc/platforms/cell/celleb_scc_pciex.c
9051index 4278acf..67fd0e6 100644
9052--- a/arch/powerpc/platforms/cell/celleb_scc_pciex.c
9053+++ b/arch/powerpc/platforms/cell/celleb_scc_pciex.c
9054@@ -400,8 +400,8 @@ static int scc_pciex_write_config(struct pci_bus *bus, unsigned int devfn,
9055 }
9056
9057 static struct pci_ops scc_pciex_pci_ops = {
9058- scc_pciex_read_config,
9059- scc_pciex_write_config,
9060+ .read = scc_pciex_read_config,
9061+ .write = scc_pciex_write_config,
9062 };
9063
9064 static void pciex_clear_intr_all(unsigned int __iomem *base)
9065diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c
9066index 9098692..3d54cd1 100644
9067--- a/arch/powerpc/platforms/cell/spufs/file.c
9068+++ b/arch/powerpc/platforms/cell/spufs/file.c
9069@@ -280,9 +280,9 @@ spufs_mem_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
9070 return VM_FAULT_NOPAGE;
9071 }
9072
9073-static int spufs_mem_mmap_access(struct vm_area_struct *vma,
9074+static ssize_t spufs_mem_mmap_access(struct vm_area_struct *vma,
9075 unsigned long address,
9076- void *buf, int len, int write)
9077+ void *buf, size_t len, int write)
9078 {
9079 struct spu_context *ctx = vma->vm_file->private_data;
9080 unsigned long offset = address - vma->vm_start;
9081diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
9082index 1d47061..0714963 100644
9083--- a/arch/s390/include/asm/atomic.h
9084+++ b/arch/s390/include/asm/atomic.h
9085@@ -412,6 +412,16 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v)
9086 #define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0)
9087 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
9088
9089+#define atomic64_read_unchecked(v) atomic64_read(v)
9090+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
9091+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
9092+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
9093+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
9094+#define atomic64_inc_unchecked(v) atomic64_inc(v)
9095+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
9096+#define atomic64_dec_unchecked(v) atomic64_dec(v)
9097+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
9098+
9099 #define smp_mb__before_atomic_dec() smp_mb()
9100 #define smp_mb__after_atomic_dec() smp_mb()
9101 #define smp_mb__before_atomic_inc() smp_mb()
9102diff --git a/arch/s390/include/asm/barrier.h b/arch/s390/include/asm/barrier.h
9103index 578680f..0eb3b11 100644
9104--- a/arch/s390/include/asm/barrier.h
9105+++ b/arch/s390/include/asm/barrier.h
9106@@ -36,7 +36,7 @@
9107 do { \
9108 compiletime_assert_atomic_type(*p); \
9109 barrier(); \
9110- ACCESS_ONCE(*p) = (v); \
9111+ ACCESS_ONCE_RW(*p) = (v); \
9112 } while (0)
9113
9114 #define smp_load_acquire(p) \
9115diff --git a/arch/s390/include/asm/cache.h b/arch/s390/include/asm/cache.h
9116index 4d7ccac..d03d0ad 100644
9117--- a/arch/s390/include/asm/cache.h
9118+++ b/arch/s390/include/asm/cache.h
9119@@ -9,8 +9,10 @@
9120 #ifndef __ARCH_S390_CACHE_H
9121 #define __ARCH_S390_CACHE_H
9122
9123-#define L1_CACHE_BYTES 256
9124+#include <linux/const.h>
9125+
9126 #define L1_CACHE_SHIFT 8
9127+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
9128 #define NET_SKB_PAD 32
9129
9130 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
9131diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
9132index 78f4f87..598ce39 100644
9133--- a/arch/s390/include/asm/elf.h
9134+++ b/arch/s390/include/asm/elf.h
9135@@ -162,8 +162,14 @@ extern unsigned int vdso_enabled;
9136 the loader. We need to make sure that it is out of the way of the program
9137 that it will "exec", and that there is sufficient room for the brk. */
9138
9139-extern unsigned long randomize_et_dyn(unsigned long base);
9140-#define ELF_ET_DYN_BASE (randomize_et_dyn(STACK_TOP / 3 * 2))
9141+#define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
9142+
9143+#ifdef CONFIG_PAX_ASLR
9144+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
9145+
9146+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
9147+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
9148+#endif
9149
9150 /* This yields a mask that user programs can use to figure out what
9151 instruction set this CPU supports. */
9152@@ -222,9 +228,6 @@ struct linux_binprm;
9153 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
9154 int arch_setup_additional_pages(struct linux_binprm *, int);
9155
9156-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
9157-#define arch_randomize_brk arch_randomize_brk
9158-
9159 void *fill_cpu_elf_notes(void *ptr, struct save_area *sa);
9160
9161 #endif
9162diff --git a/arch/s390/include/asm/exec.h b/arch/s390/include/asm/exec.h
9163index c4a93d6..4d2a9b4 100644
9164--- a/arch/s390/include/asm/exec.h
9165+++ b/arch/s390/include/asm/exec.h
9166@@ -7,6 +7,6 @@
9167 #ifndef __ASM_EXEC_H
9168 #define __ASM_EXEC_H
9169
9170-extern unsigned long arch_align_stack(unsigned long sp);
9171+#define arch_align_stack(x) ((x) & ~0xfUL)
9172
9173 #endif /* __ASM_EXEC_H */
9174diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
9175index 79330af..254cf37 100644
9176--- a/arch/s390/include/asm/uaccess.h
9177+++ b/arch/s390/include/asm/uaccess.h
9178@@ -59,6 +59,7 @@ static inline int __range_ok(unsigned long addr, unsigned long size)
9179 __range_ok((unsigned long)(addr), (size)); \
9180 })
9181
9182+#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
9183 #define access_ok(type, addr, size) __access_ok(addr, size)
9184
9185 /*
9186@@ -245,6 +246,10 @@ static inline unsigned long __must_check
9187 copy_to_user(void __user *to, const void *from, unsigned long n)
9188 {
9189 might_fault();
9190+
9191+ if ((long)n < 0)
9192+ return n;
9193+
9194 return __copy_to_user(to, from, n);
9195 }
9196
9197@@ -268,6 +273,9 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
9198 static inline unsigned long __must_check
9199 __copy_from_user(void *to, const void __user *from, unsigned long n)
9200 {
9201+ if ((long)n < 0)
9202+ return n;
9203+
9204 return uaccess.copy_from_user(n, from, to);
9205 }
9206
9207@@ -296,10 +304,14 @@ __compiletime_warning("copy_from_user() buffer size is not provably correct")
9208 static inline unsigned long __must_check
9209 copy_from_user(void *to, const void __user *from, unsigned long n)
9210 {
9211- unsigned int sz = __compiletime_object_size(to);
9212+ size_t sz = __compiletime_object_size(to);
9213
9214 might_fault();
9215- if (unlikely(sz != -1 && sz < n)) {
9216+
9217+ if ((long)n < 0)
9218+ return n;
9219+
9220+ if (unlikely(sz != (size_t)-1 && sz < n)) {
9221 copy_from_user_overflow();
9222 return n;
9223 }
9224diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
9225index b89b591..fd9609d 100644
9226--- a/arch/s390/kernel/module.c
9227+++ b/arch/s390/kernel/module.c
9228@@ -169,11 +169,11 @@ int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
9229
9230 /* Increase core size by size of got & plt and set start
9231 offsets for got and plt. */
9232- me->core_size = ALIGN(me->core_size, 4);
9233- me->arch.got_offset = me->core_size;
9234- me->core_size += me->arch.got_size;
9235- me->arch.plt_offset = me->core_size;
9236- me->core_size += me->arch.plt_size;
9237+ me->core_size_rw = ALIGN(me->core_size_rw, 4);
9238+ me->arch.got_offset = me->core_size_rw;
9239+ me->core_size_rw += me->arch.got_size;
9240+ me->arch.plt_offset = me->core_size_rx;
9241+ me->core_size_rx += me->arch.plt_size;
9242 return 0;
9243 }
9244
9245@@ -289,7 +289,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
9246 if (info->got_initialized == 0) {
9247 Elf_Addr *gotent;
9248
9249- gotent = me->module_core + me->arch.got_offset +
9250+ gotent = me->module_core_rw + me->arch.got_offset +
9251 info->got_offset;
9252 *gotent = val;
9253 info->got_initialized = 1;
9254@@ -312,7 +312,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
9255 rc = apply_rela_bits(loc, val, 0, 64, 0);
9256 else if (r_type == R_390_GOTENT ||
9257 r_type == R_390_GOTPLTENT) {
9258- val += (Elf_Addr) me->module_core - loc;
9259+ val += (Elf_Addr) me->module_core_rw - loc;
9260 rc = apply_rela_bits(loc, val, 1, 32, 1);
9261 }
9262 break;
9263@@ -325,7 +325,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
9264 case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
9265 if (info->plt_initialized == 0) {
9266 unsigned int *ip;
9267- ip = me->module_core + me->arch.plt_offset +
9268+ ip = me->module_core_rx + me->arch.plt_offset +
9269 info->plt_offset;
9270 #ifndef CONFIG_64BIT
9271 ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
9272@@ -350,7 +350,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
9273 val - loc + 0xffffUL < 0x1ffffeUL) ||
9274 (r_type == R_390_PLT32DBL &&
9275 val - loc + 0xffffffffULL < 0x1fffffffeULL)))
9276- val = (Elf_Addr) me->module_core +
9277+ val = (Elf_Addr) me->module_core_rx +
9278 me->arch.plt_offset +
9279 info->plt_offset;
9280 val += rela->r_addend - loc;
9281@@ -372,7 +372,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
9282 case R_390_GOTOFF32: /* 32 bit offset to GOT. */
9283 case R_390_GOTOFF64: /* 64 bit offset to GOT. */
9284 val = val + rela->r_addend -
9285- ((Elf_Addr) me->module_core + me->arch.got_offset);
9286+ ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
9287 if (r_type == R_390_GOTOFF16)
9288 rc = apply_rela_bits(loc, val, 0, 16, 0);
9289 else if (r_type == R_390_GOTOFF32)
9290@@ -382,7 +382,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
9291 break;
9292 case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
9293 case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
9294- val = (Elf_Addr) me->module_core + me->arch.got_offset +
9295+ val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
9296 rela->r_addend - loc;
9297 if (r_type == R_390_GOTPC)
9298 rc = apply_rela_bits(loc, val, 1, 32, 0);
9299diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
9300index dd14532..1dfc145 100644
9301--- a/arch/s390/kernel/process.c
9302+++ b/arch/s390/kernel/process.c
9303@@ -242,37 +242,3 @@ unsigned long get_wchan(struct task_struct *p)
9304 }
9305 return 0;
9306 }
9307-
9308-unsigned long arch_align_stack(unsigned long sp)
9309-{
9310- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
9311- sp -= get_random_int() & ~PAGE_MASK;
9312- return sp & ~0xf;
9313-}
9314-
9315-static inline unsigned long brk_rnd(void)
9316-{
9317- /* 8MB for 32bit, 1GB for 64bit */
9318- if (is_32bit_task())
9319- return (get_random_int() & 0x7ffUL) << PAGE_SHIFT;
9320- else
9321- return (get_random_int() & 0x3ffffUL) << PAGE_SHIFT;
9322-}
9323-
9324-unsigned long arch_randomize_brk(struct mm_struct *mm)
9325-{
9326- unsigned long ret;
9327-
9328- ret = PAGE_ALIGN(mm->brk + brk_rnd());
9329- return (ret > mm->brk) ? ret : mm->brk;
9330-}
9331-
9332-unsigned long randomize_et_dyn(unsigned long base)
9333-{
9334- unsigned long ret;
9335-
9336- if (!(current->flags & PF_RANDOMIZE))
9337- return base;
9338- ret = PAGE_ALIGN(base + brk_rnd());
9339- return (ret > base) ? ret : base;
9340-}
9341diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
9342index 9b436c2..54fbf0a 100644
9343--- a/arch/s390/mm/mmap.c
9344+++ b/arch/s390/mm/mmap.c
9345@@ -95,9 +95,21 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
9346 */
9347 if (mmap_is_legacy()) {
9348 mm->mmap_base = mmap_base_legacy();
9349+
9350+#ifdef CONFIG_PAX_RANDMMAP
9351+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9352+ mm->mmap_base += mm->delta_mmap;
9353+#endif
9354+
9355 mm->get_unmapped_area = arch_get_unmapped_area;
9356 } else {
9357 mm->mmap_base = mmap_base();
9358+
9359+#ifdef CONFIG_PAX_RANDMMAP
9360+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9361+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
9362+#endif
9363+
9364 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
9365 }
9366 }
9367@@ -170,9 +182,21 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
9368 */
9369 if (mmap_is_legacy()) {
9370 mm->mmap_base = mmap_base_legacy();
9371+
9372+#ifdef CONFIG_PAX_RANDMMAP
9373+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9374+ mm->mmap_base += mm->delta_mmap;
9375+#endif
9376+
9377 mm->get_unmapped_area = s390_get_unmapped_area;
9378 } else {
9379 mm->mmap_base = mmap_base();
9380+
9381+#ifdef CONFIG_PAX_RANDMMAP
9382+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9383+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
9384+#endif
9385+
9386 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
9387 }
9388 }
9389diff --git a/arch/score/include/asm/cache.h b/arch/score/include/asm/cache.h
9390index ae3d59f..f65f075 100644
9391--- a/arch/score/include/asm/cache.h
9392+++ b/arch/score/include/asm/cache.h
9393@@ -1,7 +1,9 @@
9394 #ifndef _ASM_SCORE_CACHE_H
9395 #define _ASM_SCORE_CACHE_H
9396
9397+#include <linux/const.h>
9398+
9399 #define L1_CACHE_SHIFT 4
9400-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
9401+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
9402
9403 #endif /* _ASM_SCORE_CACHE_H */
9404diff --git a/arch/score/include/asm/exec.h b/arch/score/include/asm/exec.h
9405index f9f3cd5..58ff438 100644
9406--- a/arch/score/include/asm/exec.h
9407+++ b/arch/score/include/asm/exec.h
9408@@ -1,6 +1,6 @@
9409 #ifndef _ASM_SCORE_EXEC_H
9410 #define _ASM_SCORE_EXEC_H
9411
9412-extern unsigned long arch_align_stack(unsigned long sp);
9413+#define arch_align_stack(x) (x)
9414
9415 #endif /* _ASM_SCORE_EXEC_H */
9416diff --git a/arch/score/kernel/process.c b/arch/score/kernel/process.c
9417index a1519ad3..e8ac1ff 100644
9418--- a/arch/score/kernel/process.c
9419+++ b/arch/score/kernel/process.c
9420@@ -116,8 +116,3 @@ unsigned long get_wchan(struct task_struct *task)
9421
9422 return task_pt_regs(task)->cp0_epc;
9423 }
9424-
9425-unsigned long arch_align_stack(unsigned long sp)
9426-{
9427- return sp;
9428-}
9429diff --git a/arch/sh/include/asm/cache.h b/arch/sh/include/asm/cache.h
9430index ef9e555..331bd29 100644
9431--- a/arch/sh/include/asm/cache.h
9432+++ b/arch/sh/include/asm/cache.h
9433@@ -9,10 +9,11 @@
9434 #define __ASM_SH_CACHE_H
9435 #ifdef __KERNEL__
9436
9437+#include <linux/const.h>
9438 #include <linux/init.h>
9439 #include <cpu/cache.h>
9440
9441-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
9442+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
9443
9444 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
9445
9446diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c
9447index 6777177..cb5e44f 100644
9448--- a/arch/sh/mm/mmap.c
9449+++ b/arch/sh/mm/mmap.c
9450@@ -36,6 +36,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
9451 struct mm_struct *mm = current->mm;
9452 struct vm_area_struct *vma;
9453 int do_colour_align;
9454+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
9455 struct vm_unmapped_area_info info;
9456
9457 if (flags & MAP_FIXED) {
9458@@ -55,6 +56,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
9459 if (filp || (flags & MAP_SHARED))
9460 do_colour_align = 1;
9461
9462+#ifdef CONFIG_PAX_RANDMMAP
9463+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
9464+#endif
9465+
9466 if (addr) {
9467 if (do_colour_align)
9468 addr = COLOUR_ALIGN(addr, pgoff);
9469@@ -62,14 +67,13 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
9470 addr = PAGE_ALIGN(addr);
9471
9472 vma = find_vma(mm, addr);
9473- if (TASK_SIZE - len >= addr &&
9474- (!vma || addr + len <= vma->vm_start))
9475+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
9476 return addr;
9477 }
9478
9479 info.flags = 0;
9480 info.length = len;
9481- info.low_limit = TASK_UNMAPPED_BASE;
9482+ info.low_limit = mm->mmap_base;
9483 info.high_limit = TASK_SIZE;
9484 info.align_mask = do_colour_align ? (PAGE_MASK & shm_align_mask) : 0;
9485 info.align_offset = pgoff << PAGE_SHIFT;
9486@@ -85,6 +89,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
9487 struct mm_struct *mm = current->mm;
9488 unsigned long addr = addr0;
9489 int do_colour_align;
9490+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
9491 struct vm_unmapped_area_info info;
9492
9493 if (flags & MAP_FIXED) {
9494@@ -104,6 +109,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
9495 if (filp || (flags & MAP_SHARED))
9496 do_colour_align = 1;
9497
9498+#ifdef CONFIG_PAX_RANDMMAP
9499+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
9500+#endif
9501+
9502 /* requesting a specific address */
9503 if (addr) {
9504 if (do_colour_align)
9505@@ -112,8 +121,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
9506 addr = PAGE_ALIGN(addr);
9507
9508 vma = find_vma(mm, addr);
9509- if (TASK_SIZE - len >= addr &&
9510- (!vma || addr + len <= vma->vm_start))
9511+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
9512 return addr;
9513 }
9514
9515@@ -135,6 +143,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
9516 VM_BUG_ON(addr != -ENOMEM);
9517 info.flags = 0;
9518 info.low_limit = TASK_UNMAPPED_BASE;
9519+
9520+#ifdef CONFIG_PAX_RANDMMAP
9521+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9522+ info.low_limit += mm->delta_mmap;
9523+#endif
9524+
9525 info.high_limit = TASK_SIZE;
9526 addr = vm_unmapped_area(&info);
9527 }
9528diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
9529index be56a24..443328f 100644
9530--- a/arch/sparc/include/asm/atomic_64.h
9531+++ b/arch/sparc/include/asm/atomic_64.h
9532@@ -14,18 +14,40 @@
9533 #define ATOMIC64_INIT(i) { (i) }
9534
9535 #define atomic_read(v) (*(volatile int *)&(v)->counter)
9536+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
9537+{
9538+ return v->counter;
9539+}
9540 #define atomic64_read(v) (*(volatile long *)&(v)->counter)
9541+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
9542+{
9543+ return v->counter;
9544+}
9545
9546 #define atomic_set(v, i) (((v)->counter) = i)
9547+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
9548+{
9549+ v->counter = i;
9550+}
9551 #define atomic64_set(v, i) (((v)->counter) = i)
9552+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
9553+{
9554+ v->counter = i;
9555+}
9556
9557 extern void atomic_add(int, atomic_t *);
9558+extern void atomic_add_unchecked(int, atomic_unchecked_t *);
9559 extern void atomic64_add(long, atomic64_t *);
9560+extern void atomic64_add_unchecked(long, atomic64_unchecked_t *);
9561 extern void atomic_sub(int, atomic_t *);
9562+extern void atomic_sub_unchecked(int, atomic_unchecked_t *);
9563 extern void atomic64_sub(long, atomic64_t *);
9564+extern void atomic64_sub_unchecked(long, atomic64_unchecked_t *);
9565
9566 extern int atomic_add_ret(int, atomic_t *);
9567+extern int atomic_add_ret_unchecked(int, atomic_unchecked_t *);
9568 extern long atomic64_add_ret(long, atomic64_t *);
9569+extern long atomic64_add_ret_unchecked(long, atomic64_unchecked_t *);
9570 extern int atomic_sub_ret(int, atomic_t *);
9571 extern long atomic64_sub_ret(long, atomic64_t *);
9572
9573@@ -33,13 +55,29 @@ extern long atomic64_sub_ret(long, atomic64_t *);
9574 #define atomic64_dec_return(v) atomic64_sub_ret(1, v)
9575
9576 #define atomic_inc_return(v) atomic_add_ret(1, v)
9577+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
9578+{
9579+ return atomic_add_ret_unchecked(1, v);
9580+}
9581 #define atomic64_inc_return(v) atomic64_add_ret(1, v)
9582+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
9583+{
9584+ return atomic64_add_ret_unchecked(1, v);
9585+}
9586
9587 #define atomic_sub_return(i, v) atomic_sub_ret(i, v)
9588 #define atomic64_sub_return(i, v) atomic64_sub_ret(i, v)
9589
9590 #define atomic_add_return(i, v) atomic_add_ret(i, v)
9591+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
9592+{
9593+ return atomic_add_ret_unchecked(i, v);
9594+}
9595 #define atomic64_add_return(i, v) atomic64_add_ret(i, v)
9596+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
9597+{
9598+ return atomic64_add_ret_unchecked(i, v);
9599+}
9600
9601 /*
9602 * atomic_inc_and_test - increment and test
9603@@ -50,6 +88,10 @@ extern long atomic64_sub_ret(long, atomic64_t *);
9604 * other cases.
9605 */
9606 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
9607+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
9608+{
9609+ return atomic_inc_return_unchecked(v) == 0;
9610+}
9611 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
9612
9613 #define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0)
9614@@ -59,25 +101,60 @@ extern long atomic64_sub_ret(long, atomic64_t *);
9615 #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0)
9616
9617 #define atomic_inc(v) atomic_add(1, v)
9618+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
9619+{
9620+ atomic_add_unchecked(1, v);
9621+}
9622 #define atomic64_inc(v) atomic64_add(1, v)
9623+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
9624+{
9625+ atomic64_add_unchecked(1, v);
9626+}
9627
9628 #define atomic_dec(v) atomic_sub(1, v)
9629+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
9630+{
9631+ atomic_sub_unchecked(1, v);
9632+}
9633 #define atomic64_dec(v) atomic64_sub(1, v)
9634+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
9635+{
9636+ atomic64_sub_unchecked(1, v);
9637+}
9638
9639 #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
9640 #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
9641
9642 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
9643+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
9644+{
9645+ return cmpxchg(&v->counter, old, new);
9646+}
9647 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
9648+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
9649+{
9650+ return xchg(&v->counter, new);
9651+}
9652
9653 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
9654 {
9655- int c, old;
9656+ int c, old, new;
9657 c = atomic_read(v);
9658 for (;;) {
9659- if (unlikely(c == (u)))
9660+ if (unlikely(c == u))
9661 break;
9662- old = atomic_cmpxchg((v), c, c + (a));
9663+
9664+ asm volatile("addcc %2, %0, %0\n"
9665+
9666+#ifdef CONFIG_PAX_REFCOUNT
9667+ "tvs %%icc, 6\n"
9668+#endif
9669+
9670+ : "=r" (new)
9671+ : "0" (c), "ir" (a)
9672+ : "cc");
9673+
9674+ old = atomic_cmpxchg(v, c, new);
9675 if (likely(old == c))
9676 break;
9677 c = old;
9678@@ -88,20 +165,35 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
9679 #define atomic64_cmpxchg(v, o, n) \
9680 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
9681 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
9682+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
9683+{
9684+ return xchg(&v->counter, new);
9685+}
9686
9687 static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
9688 {
9689- long c, old;
9690+ long c, old, new;
9691 c = atomic64_read(v);
9692 for (;;) {
9693- if (unlikely(c == (u)))
9694+ if (unlikely(c == u))
9695 break;
9696- old = atomic64_cmpxchg((v), c, c + (a));
9697+
9698+ asm volatile("addcc %2, %0, %0\n"
9699+
9700+#ifdef CONFIG_PAX_REFCOUNT
9701+ "tvs %%xcc, 6\n"
9702+#endif
9703+
9704+ : "=r" (new)
9705+ : "0" (c), "ir" (a)
9706+ : "cc");
9707+
9708+ old = atomic64_cmpxchg(v, c, new);
9709 if (likely(old == c))
9710 break;
9711 c = old;
9712 }
9713- return c != (u);
9714+ return c != u;
9715 }
9716
9717 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
9718diff --git a/arch/sparc/include/asm/barrier_64.h b/arch/sparc/include/asm/barrier_64.h
9719index b5aad96..99d7465 100644
9720--- a/arch/sparc/include/asm/barrier_64.h
9721+++ b/arch/sparc/include/asm/barrier_64.h
9722@@ -57,7 +57,7 @@ do { __asm__ __volatile__("ba,pt %%xcc, 1f\n\t" \
9723 do { \
9724 compiletime_assert_atomic_type(*p); \
9725 barrier(); \
9726- ACCESS_ONCE(*p) = (v); \
9727+ ACCESS_ONCE_RW(*p) = (v); \
9728 } while (0)
9729
9730 #define smp_load_acquire(p) \
9731diff --git a/arch/sparc/include/asm/cache.h b/arch/sparc/include/asm/cache.h
9732index 5bb6991..5c2132e 100644
9733--- a/arch/sparc/include/asm/cache.h
9734+++ b/arch/sparc/include/asm/cache.h
9735@@ -7,10 +7,12 @@
9736 #ifndef _SPARC_CACHE_H
9737 #define _SPARC_CACHE_H
9738
9739+#include <linux/const.h>
9740+
9741 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
9742
9743 #define L1_CACHE_SHIFT 5
9744-#define L1_CACHE_BYTES 32
9745+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
9746
9747 #ifdef CONFIG_SPARC32
9748 #define SMP_CACHE_BYTES_SHIFT 5
9749diff --git a/arch/sparc/include/asm/elf_32.h b/arch/sparc/include/asm/elf_32.h
9750index a24e41f..47677ff 100644
9751--- a/arch/sparc/include/asm/elf_32.h
9752+++ b/arch/sparc/include/asm/elf_32.h
9753@@ -114,6 +114,13 @@ typedef struct {
9754
9755 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
9756
9757+#ifdef CONFIG_PAX_ASLR
9758+#define PAX_ELF_ET_DYN_BASE 0x10000UL
9759+
9760+#define PAX_DELTA_MMAP_LEN 16
9761+#define PAX_DELTA_STACK_LEN 16
9762+#endif
9763+
9764 /* This yields a mask that user programs can use to figure out what
9765 instruction set this cpu supports. This can NOT be done in userspace
9766 on Sparc. */
9767diff --git a/arch/sparc/include/asm/elf_64.h b/arch/sparc/include/asm/elf_64.h
9768index 370ca1e..d4f4a98 100644
9769--- a/arch/sparc/include/asm/elf_64.h
9770+++ b/arch/sparc/include/asm/elf_64.h
9771@@ -189,6 +189,13 @@ typedef struct {
9772 #define ELF_ET_DYN_BASE 0x0000010000000000UL
9773 #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
9774
9775+#ifdef CONFIG_PAX_ASLR
9776+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
9777+
9778+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
9779+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
9780+#endif
9781+
9782 extern unsigned long sparc64_elf_hwcap;
9783 #define ELF_HWCAP sparc64_elf_hwcap
9784
9785diff --git a/arch/sparc/include/asm/pgalloc_32.h b/arch/sparc/include/asm/pgalloc_32.h
9786index 9b1c36d..209298b 100644
9787--- a/arch/sparc/include/asm/pgalloc_32.h
9788+++ b/arch/sparc/include/asm/pgalloc_32.h
9789@@ -33,6 +33,7 @@ static inline void pgd_set(pgd_t * pgdp, pmd_t * pmdp)
9790 }
9791
9792 #define pgd_populate(MM, PGD, PMD) pgd_set(PGD, PMD)
9793+#define pgd_populate_kernel(MM, PGD, PMD) pgd_populate((MM), (PGD), (PMD))
9794
9795 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm,
9796 unsigned long address)
9797diff --git a/arch/sparc/include/asm/pgalloc_64.h b/arch/sparc/include/asm/pgalloc_64.h
9798index bcfe063..b333142 100644
9799--- a/arch/sparc/include/asm/pgalloc_64.h
9800+++ b/arch/sparc/include/asm/pgalloc_64.h
9801@@ -26,6 +26,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
9802 }
9803
9804 #define pud_populate(MM, PUD, PMD) pud_set(PUD, PMD)
9805+#define pud_populate_kernel(MM, PUD, PMD) pud_populate((MM), (PUD), (PMD))
9806
9807 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
9808 {
9809diff --git a/arch/sparc/include/asm/pgtable.h b/arch/sparc/include/asm/pgtable.h
9810index 59ba6f6..4518128 100644
9811--- a/arch/sparc/include/asm/pgtable.h
9812+++ b/arch/sparc/include/asm/pgtable.h
9813@@ -5,4 +5,8 @@
9814 #else
9815 #include <asm/pgtable_32.h>
9816 #endif
9817+
9818+#define ktla_ktva(addr) (addr)
9819+#define ktva_ktla(addr) (addr)
9820+
9821 #endif
9822diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h
9823index 502f632..da1917f 100644
9824--- a/arch/sparc/include/asm/pgtable_32.h
9825+++ b/arch/sparc/include/asm/pgtable_32.h
9826@@ -50,6 +50,9 @@ extern unsigned long calc_highpages(void);
9827 #define PAGE_SHARED SRMMU_PAGE_SHARED
9828 #define PAGE_COPY SRMMU_PAGE_COPY
9829 #define PAGE_READONLY SRMMU_PAGE_RDONLY
9830+#define PAGE_SHARED_NOEXEC SRMMU_PAGE_SHARED_NOEXEC
9831+#define PAGE_COPY_NOEXEC SRMMU_PAGE_COPY_NOEXEC
9832+#define PAGE_READONLY_NOEXEC SRMMU_PAGE_RDONLY_NOEXEC
9833 #define PAGE_KERNEL SRMMU_PAGE_KERNEL
9834
9835 /* Top-level page directory - dummy used by init-mm.
9836@@ -62,18 +65,18 @@ extern unsigned long ptr_in_current_pgd;
9837
9838 /* xwr */
9839 #define __P000 PAGE_NONE
9840-#define __P001 PAGE_READONLY
9841-#define __P010 PAGE_COPY
9842-#define __P011 PAGE_COPY
9843+#define __P001 PAGE_READONLY_NOEXEC
9844+#define __P010 PAGE_COPY_NOEXEC
9845+#define __P011 PAGE_COPY_NOEXEC
9846 #define __P100 PAGE_READONLY
9847 #define __P101 PAGE_READONLY
9848 #define __P110 PAGE_COPY
9849 #define __P111 PAGE_COPY
9850
9851 #define __S000 PAGE_NONE
9852-#define __S001 PAGE_READONLY
9853-#define __S010 PAGE_SHARED
9854-#define __S011 PAGE_SHARED
9855+#define __S001 PAGE_READONLY_NOEXEC
9856+#define __S010 PAGE_SHARED_NOEXEC
9857+#define __S011 PAGE_SHARED_NOEXEC
9858 #define __S100 PAGE_READONLY
9859 #define __S101 PAGE_READONLY
9860 #define __S110 PAGE_SHARED
9861diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h
9862index 0f9e945..a949e55 100644
9863--- a/arch/sparc/include/asm/pgtable_64.h
9864+++ b/arch/sparc/include/asm/pgtable_64.h
9865@@ -71,6 +71,23 @@
9866
9867 #include <linux/sched.h>
9868
9869+extern unsigned long sparc64_valid_addr_bitmap[];
9870+
9871+/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
9872+static inline bool __kern_addr_valid(unsigned long paddr)
9873+{
9874+ if ((paddr >> MAX_PHYS_ADDRESS_BITS) != 0UL)
9875+ return false;
9876+ return test_bit(paddr >> ILOG2_4MB, sparc64_valid_addr_bitmap);
9877+}
9878+
9879+static inline bool kern_addr_valid(unsigned long addr)
9880+{
9881+ unsigned long paddr = __pa(addr);
9882+
9883+ return __kern_addr_valid(paddr);
9884+}
9885+
9886 /* Entries per page directory level. */
9887 #define PTRS_PER_PTE (1UL << (PAGE_SHIFT-3))
9888 #define PTRS_PER_PMD (1UL << PMD_BITS)
9889@@ -79,9 +96,12 @@
9890 /* Kernel has a separate 44bit address space. */
9891 #define FIRST_USER_ADDRESS 0
9892
9893-#define pte_ERROR(e) __builtin_trap()
9894-#define pmd_ERROR(e) __builtin_trap()
9895-#define pgd_ERROR(e) __builtin_trap()
9896+#define pmd_ERROR(e) \
9897+ pr_err("%s:%d: bad pmd %p(%016lx) seen at (%pS)\n", \
9898+ __FILE__, __LINE__, &(e), pmd_val(e), __builtin_return_address(0))
9899+#define pgd_ERROR(e) \
9900+ pr_err("%s:%d: bad pgd %p(%016lx) seen at (%pS)\n", \
9901+ __FILE__, __LINE__, &(e), pgd_val(e), __builtin_return_address(0))
9902
9903 #endif /* !(__ASSEMBLY__) */
9904
9905@@ -633,7 +653,7 @@ static inline unsigned long pmd_large(pmd_t pmd)
9906 {
9907 pte_t pte = __pte(pmd_val(pmd));
9908
9909- return (pte_val(pte) & _PAGE_PMD_HUGE) && pte_present(pte);
9910+ return pte_val(pte) & _PAGE_PMD_HUGE;
9911 }
9912
9913 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
9914@@ -719,20 +739,6 @@ static inline pmd_t pmd_mkwrite(pmd_t pmd)
9915 return __pmd(pte_val(pte));
9916 }
9917
9918-static inline pmd_t pmd_mknotpresent(pmd_t pmd)
9919-{
9920- unsigned long mask;
9921-
9922- if (tlb_type == hypervisor)
9923- mask = _PAGE_PRESENT_4V;
9924- else
9925- mask = _PAGE_PRESENT_4U;
9926-
9927- pmd_val(pmd) &= ~mask;
9928-
9929- return pmd;
9930-}
9931-
9932 static inline pmd_t pmd_mksplitting(pmd_t pmd)
9933 {
9934 pte_t pte = __pte(pmd_val(pmd));
9935@@ -757,6 +763,20 @@ static inline int pmd_present(pmd_t pmd)
9936
9937 #define pmd_none(pmd) (!pmd_val(pmd))
9938
9939+/* pmd_bad() is only called on non-trans-huge PMDs. Our encoding is
9940+ * very simple, it's just the physical address. PTE tables are of
9941+ * size PAGE_SIZE so make sure the sub-PAGE_SIZE bits are clear and
9942+ * the top bits outside of the range of any physical address size we
9943+ * support are clear as well. We also validate the physical itself.
9944+ */
9945+#define pmd_bad(pmd) ((pmd_val(pmd) & ~PAGE_MASK) || \
9946+ !__kern_addr_valid(pmd_val(pmd)))
9947+
9948+#define pud_none(pud) (!pud_val(pud))
9949+
9950+#define pud_bad(pud) ((pud_val(pud) & ~PAGE_MASK) || \
9951+ !__kern_addr_valid(pud_val(pud)))
9952+
9953 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
9954 extern void set_pmd_at(struct mm_struct *mm, unsigned long addr,
9955 pmd_t *pmdp, pmd_t pmd);
9956@@ -790,10 +810,7 @@ static inline unsigned long __pmd_page(pmd_t pmd)
9957 #define pud_page_vaddr(pud) \
9958 ((unsigned long) __va(pud_val(pud)))
9959 #define pud_page(pud) virt_to_page((void *)pud_page_vaddr(pud))
9960-#define pmd_bad(pmd) (0)
9961 #define pmd_clear(pmdp) (pmd_val(*(pmdp)) = 0UL)
9962-#define pud_none(pud) (!pud_val(pud))
9963-#define pud_bad(pud) (0)
9964 #define pud_present(pud) (pud_val(pud) != 0U)
9965 #define pud_clear(pudp) (pud_val(*(pudp)) = 0UL)
9966
9967@@ -893,6 +910,10 @@ extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t *);
9968 extern void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
9969 pmd_t *pmd);
9970
9971+#define __HAVE_ARCH_PMDP_INVALIDATE
9972+extern void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
9973+ pmd_t *pmdp);
9974+
9975 #define __HAVE_ARCH_PGTABLE_DEPOSIT
9976 extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
9977 pgtable_t pgtable);
9978@@ -919,18 +940,6 @@ extern unsigned long pte_file(pte_t);
9979 extern pte_t pgoff_to_pte(unsigned long);
9980 #define PTE_FILE_MAX_BITS (64UL - PAGE_SHIFT - 1UL)
9981
9982-extern unsigned long sparc64_valid_addr_bitmap[];
9983-
9984-/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
9985-static inline bool kern_addr_valid(unsigned long addr)
9986-{
9987- unsigned long paddr = __pa(addr);
9988-
9989- if ((paddr >> 41UL) != 0UL)
9990- return false;
9991- return test_bit(paddr >> 22, sparc64_valid_addr_bitmap);
9992-}
9993-
9994 extern int page_in_phys_avail(unsigned long paddr);
9995
9996 /*
9997diff --git a/arch/sparc/include/asm/pgtsrmmu.h b/arch/sparc/include/asm/pgtsrmmu.h
9998index 79da178..c2eede8 100644
9999--- a/arch/sparc/include/asm/pgtsrmmu.h
10000+++ b/arch/sparc/include/asm/pgtsrmmu.h
10001@@ -115,6 +115,11 @@
10002 SRMMU_EXEC | SRMMU_REF)
10003 #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
10004 SRMMU_EXEC | SRMMU_REF)
10005+
10006+#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
10007+#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
10008+#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
10009+
10010 #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
10011 SRMMU_DIRTY | SRMMU_REF)
10012
10013diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h
10014index 9689176..63c18ea 100644
10015--- a/arch/sparc/include/asm/spinlock_64.h
10016+++ b/arch/sparc/include/asm/spinlock_64.h
10017@@ -92,14 +92,19 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long fla
10018
10019 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
10020
10021-static void inline arch_read_lock(arch_rwlock_t *lock)
10022+static inline void arch_read_lock(arch_rwlock_t *lock)
10023 {
10024 unsigned long tmp1, tmp2;
10025
10026 __asm__ __volatile__ (
10027 "1: ldsw [%2], %0\n"
10028 " brlz,pn %0, 2f\n"
10029-"4: add %0, 1, %1\n"
10030+"4: addcc %0, 1, %1\n"
10031+
10032+#ifdef CONFIG_PAX_REFCOUNT
10033+" tvs %%icc, 6\n"
10034+#endif
10035+
10036 " cas [%2], %0, %1\n"
10037 " cmp %0, %1\n"
10038 " bne,pn %%icc, 1b\n"
10039@@ -112,10 +117,10 @@ static void inline arch_read_lock(arch_rwlock_t *lock)
10040 " .previous"
10041 : "=&r" (tmp1), "=&r" (tmp2)
10042 : "r" (lock)
10043- : "memory");
10044+ : "memory", "cc");
10045 }
10046
10047-static int inline arch_read_trylock(arch_rwlock_t *lock)
10048+static inline int arch_read_trylock(arch_rwlock_t *lock)
10049 {
10050 int tmp1, tmp2;
10051
10052@@ -123,7 +128,12 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
10053 "1: ldsw [%2], %0\n"
10054 " brlz,a,pn %0, 2f\n"
10055 " mov 0, %0\n"
10056-" add %0, 1, %1\n"
10057+" addcc %0, 1, %1\n"
10058+
10059+#ifdef CONFIG_PAX_REFCOUNT
10060+" tvs %%icc, 6\n"
10061+#endif
10062+
10063 " cas [%2], %0, %1\n"
10064 " cmp %0, %1\n"
10065 " bne,pn %%icc, 1b\n"
10066@@ -136,13 +146,18 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
10067 return tmp1;
10068 }
10069
10070-static void inline arch_read_unlock(arch_rwlock_t *lock)
10071+static inline void arch_read_unlock(arch_rwlock_t *lock)
10072 {
10073 unsigned long tmp1, tmp2;
10074
10075 __asm__ __volatile__(
10076 "1: lduw [%2], %0\n"
10077-" sub %0, 1, %1\n"
10078+" subcc %0, 1, %1\n"
10079+
10080+#ifdef CONFIG_PAX_REFCOUNT
10081+" tvs %%icc, 6\n"
10082+#endif
10083+
10084 " cas [%2], %0, %1\n"
10085 " cmp %0, %1\n"
10086 " bne,pn %%xcc, 1b\n"
10087@@ -152,7 +167,7 @@ static void inline arch_read_unlock(arch_rwlock_t *lock)
10088 : "memory");
10089 }
10090
10091-static void inline arch_write_lock(arch_rwlock_t *lock)
10092+static inline void arch_write_lock(arch_rwlock_t *lock)
10093 {
10094 unsigned long mask, tmp1, tmp2;
10095
10096@@ -177,7 +192,7 @@ static void inline arch_write_lock(arch_rwlock_t *lock)
10097 : "memory");
10098 }
10099
10100-static void inline arch_write_unlock(arch_rwlock_t *lock)
10101+static inline void arch_write_unlock(arch_rwlock_t *lock)
10102 {
10103 __asm__ __volatile__(
10104 " stw %%g0, [%0]"
10105@@ -186,7 +201,7 @@ static void inline arch_write_unlock(arch_rwlock_t *lock)
10106 : "memory");
10107 }
10108
10109-static int inline arch_write_trylock(arch_rwlock_t *lock)
10110+static inline int arch_write_trylock(arch_rwlock_t *lock)
10111 {
10112 unsigned long mask, tmp1, tmp2, result;
10113
10114diff --git a/arch/sparc/include/asm/thread_info_32.h b/arch/sparc/include/asm/thread_info_32.h
10115index 96efa7a..16858bf 100644
10116--- a/arch/sparc/include/asm/thread_info_32.h
10117+++ b/arch/sparc/include/asm/thread_info_32.h
10118@@ -49,6 +49,8 @@ struct thread_info {
10119 unsigned long w_saved;
10120
10121 struct restart_block restart_block;
10122+
10123+ unsigned long lowest_stack;
10124 };
10125
10126 /*
10127diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
10128index a5f01ac..703b554 100644
10129--- a/arch/sparc/include/asm/thread_info_64.h
10130+++ b/arch/sparc/include/asm/thread_info_64.h
10131@@ -63,6 +63,8 @@ struct thread_info {
10132 struct pt_regs *kern_una_regs;
10133 unsigned int kern_una_insn;
10134
10135+ unsigned long lowest_stack;
10136+
10137 unsigned long fpregs[0] __attribute__ ((aligned(64)));
10138 };
10139
10140@@ -188,12 +190,13 @@ register struct thread_info *current_thread_info_reg asm("g6");
10141 #define TIF_NEED_RESCHED 3 /* rescheduling necessary */
10142 /* flag bit 4 is available */
10143 #define TIF_UNALIGNED 5 /* allowed to do unaligned accesses */
10144-/* flag bit 6 is available */
10145+#define TIF_GRSEC_SETXID 6 /* update credentials on syscall entry/exit */
10146 #define TIF_32BIT 7 /* 32-bit binary */
10147 #define TIF_NOHZ 8 /* in adaptive nohz mode */
10148 #define TIF_SECCOMP 9 /* secure computing */
10149 #define TIF_SYSCALL_AUDIT 10 /* syscall auditing active */
10150 #define TIF_SYSCALL_TRACEPOINT 11 /* syscall tracepoint instrumentation */
10151+
10152 /* NOTE: Thread flags >= 12 should be ones we have no interest
10153 * in using in assembly, else we can't use the mask as
10154 * an immediate value in instructions such as andcc.
10155@@ -213,12 +216,18 @@ register struct thread_info *current_thread_info_reg asm("g6");
10156 #define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
10157 #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
10158 #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
10159+#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
10160
10161 #define _TIF_USER_WORK_MASK ((0xff << TI_FLAG_WSAVED_SHIFT) | \
10162 _TIF_DO_NOTIFY_RESUME_MASK | \
10163 _TIF_NEED_RESCHED)
10164 #define _TIF_DO_NOTIFY_RESUME_MASK (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING)
10165
10166+#define _TIF_WORK_SYSCALL \
10167+ (_TIF_SYSCALL_TRACE | _TIF_SECCOMP | _TIF_SYSCALL_AUDIT | \
10168+ _TIF_SYSCALL_TRACEPOINT | _TIF_NOHZ | _TIF_GRSEC_SETXID)
10169+
10170+
10171 /*
10172 * Thread-synchronous status.
10173 *
10174diff --git a/arch/sparc/include/asm/tsb.h b/arch/sparc/include/asm/tsb.h
10175index 2230f80..90916f9 100644
10176--- a/arch/sparc/include/asm/tsb.h
10177+++ b/arch/sparc/include/asm/tsb.h
10178@@ -171,7 +171,8 @@ extern struct tsb_phys_patch_entry __tsb_phys_patch, __tsb_phys_patch_end;
10179 andcc REG1, REG2, %g0; \
10180 be,pt %xcc, 700f; \
10181 sethi %hi(4 * 1024 * 1024), REG2; \
10182- andn REG1, REG2, REG1; \
10183+ brgez,pn REG1, FAIL_LABEL; \
10184+ andn REG1, REG2, REG1; \
10185 and VADDR, REG2, REG2; \
10186 brlz,pt REG1, PTE_LABEL; \
10187 or REG1, REG2, REG1; \
10188diff --git a/arch/sparc/include/asm/uaccess.h b/arch/sparc/include/asm/uaccess.h
10189index 0167d26..767bb0c 100644
10190--- a/arch/sparc/include/asm/uaccess.h
10191+++ b/arch/sparc/include/asm/uaccess.h
10192@@ -1,5 +1,6 @@
10193 #ifndef ___ASM_SPARC_UACCESS_H
10194 #define ___ASM_SPARC_UACCESS_H
10195+
10196 #if defined(__sparc__) && defined(__arch64__)
10197 #include <asm/uaccess_64.h>
10198 #else
10199diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h
10200index 53a28dd..50c38c3 100644
10201--- a/arch/sparc/include/asm/uaccess_32.h
10202+++ b/arch/sparc/include/asm/uaccess_32.h
10203@@ -250,27 +250,46 @@ extern unsigned long __copy_user(void __user *to, const void __user *from, unsig
10204
10205 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
10206 {
10207- if (n && __access_ok((unsigned long) to, n))
10208+ if ((long)n < 0)
10209+ return n;
10210+
10211+ if (n && __access_ok((unsigned long) to, n)) {
10212+ if (!__builtin_constant_p(n))
10213+ check_object_size(from, n, true);
10214 return __copy_user(to, (__force void __user *) from, n);
10215- else
10216+ } else
10217 return n;
10218 }
10219
10220 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
10221 {
10222+ if ((long)n < 0)
10223+ return n;
10224+
10225+ if (!__builtin_constant_p(n))
10226+ check_object_size(from, n, true);
10227+
10228 return __copy_user(to, (__force void __user *) from, n);
10229 }
10230
10231 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
10232 {
10233- if (n && __access_ok((unsigned long) from, n))
10234+ if ((long)n < 0)
10235+ return n;
10236+
10237+ if (n && __access_ok((unsigned long) from, n)) {
10238+ if (!__builtin_constant_p(n))
10239+ check_object_size(to, n, false);
10240 return __copy_user((__force void __user *) to, from, n);
10241- else
10242+ } else
10243 return n;
10244 }
10245
10246 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
10247 {
10248+ if ((long)n < 0)
10249+ return n;
10250+
10251 return __copy_user((__force void __user *) to, from, n);
10252 }
10253
10254diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
10255index ad7e178..c9e7423 100644
10256--- a/arch/sparc/include/asm/uaccess_64.h
10257+++ b/arch/sparc/include/asm/uaccess_64.h
10258@@ -10,6 +10,7 @@
10259 #include <linux/compiler.h>
10260 #include <linux/string.h>
10261 #include <linux/thread_info.h>
10262+#include <linux/kernel.h>
10263 #include <asm/asi.h>
10264 #include <asm/spitfire.h>
10265 #include <asm-generic/uaccess-unaligned.h>
10266@@ -214,8 +215,15 @@ extern unsigned long copy_from_user_fixup(void *to, const void __user *from,
10267 static inline unsigned long __must_check
10268 copy_from_user(void *to, const void __user *from, unsigned long size)
10269 {
10270- unsigned long ret = ___copy_from_user(to, from, size);
10271+ unsigned long ret;
10272
10273+ if ((long)size < 0 || size > INT_MAX)
10274+ return size;
10275+
10276+ if (!__builtin_constant_p(size))
10277+ check_object_size(to, size, false);
10278+
10279+ ret = ___copy_from_user(to, from, size);
10280 if (unlikely(ret))
10281 ret = copy_from_user_fixup(to, from, size);
10282
10283@@ -231,8 +239,15 @@ extern unsigned long copy_to_user_fixup(void __user *to, const void *from,
10284 static inline unsigned long __must_check
10285 copy_to_user(void __user *to, const void *from, unsigned long size)
10286 {
10287- unsigned long ret = ___copy_to_user(to, from, size);
10288+ unsigned long ret;
10289
10290+ if ((long)size < 0 || size > INT_MAX)
10291+ return size;
10292+
10293+ if (!__builtin_constant_p(size))
10294+ check_object_size(from, size, true);
10295+
10296+ ret = ___copy_to_user(to, from, size);
10297 if (unlikely(ret))
10298 ret = copy_to_user_fixup(to, from, size);
10299 return ret;
10300diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
10301index d15cc17..d0ae796 100644
10302--- a/arch/sparc/kernel/Makefile
10303+++ b/arch/sparc/kernel/Makefile
10304@@ -4,7 +4,7 @@
10305 #
10306
10307 asflags-y := -ansi
10308-ccflags-y := -Werror
10309+#ccflags-y := -Werror
10310
10311 extra-y := head_$(BITS).o
10312
10313diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
10314index 510baec..9ff2607 100644
10315--- a/arch/sparc/kernel/process_32.c
10316+++ b/arch/sparc/kernel/process_32.c
10317@@ -115,14 +115,14 @@ void show_regs(struct pt_regs *r)
10318
10319 printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
10320 r->psr, r->pc, r->npc, r->y, print_tainted());
10321- printk("PC: <%pS>\n", (void *) r->pc);
10322+ printk("PC: <%pA>\n", (void *) r->pc);
10323 printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
10324 r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
10325 r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
10326 printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
10327 r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
10328 r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
10329- printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
10330+ printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
10331
10332 printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
10333 rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
10334@@ -159,7 +159,7 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
10335 rw = (struct reg_window32 *) fp;
10336 pc = rw->ins[7];
10337 printk("[%08lx : ", pc);
10338- printk("%pS ] ", (void *) pc);
10339+ printk("%pA ] ", (void *) pc);
10340 fp = rw->ins[6];
10341 } while (++count < 16);
10342 printk("\n");
10343diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
10344index d7b4967..2edf827 100644
10345--- a/arch/sparc/kernel/process_64.c
10346+++ b/arch/sparc/kernel/process_64.c
10347@@ -161,7 +161,7 @@ static void show_regwindow(struct pt_regs *regs)
10348 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
10349 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
10350 if (regs->tstate & TSTATE_PRIV)
10351- printk("I7: <%pS>\n", (void *) rwk->ins[7]);
10352+ printk("I7: <%pA>\n", (void *) rwk->ins[7]);
10353 }
10354
10355 void show_regs(struct pt_regs *regs)
10356@@ -170,7 +170,7 @@ void show_regs(struct pt_regs *regs)
10357
10358 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
10359 regs->tpc, regs->tnpc, regs->y, print_tainted());
10360- printk("TPC: <%pS>\n", (void *) regs->tpc);
10361+ printk("TPC: <%pA>\n", (void *) regs->tpc);
10362 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
10363 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
10364 regs->u_regs[3]);
10365@@ -183,7 +183,7 @@ void show_regs(struct pt_regs *regs)
10366 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
10367 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
10368 regs->u_regs[15]);
10369- printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
10370+ printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
10371 show_regwindow(regs);
10372 show_stack(current, (unsigned long *) regs->u_regs[UREG_FP]);
10373 }
10374@@ -272,7 +272,7 @@ void arch_trigger_all_cpu_backtrace(void)
10375 ((tp && tp->task) ? tp->task->pid : -1));
10376
10377 if (gp->tstate & TSTATE_PRIV) {
10378- printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
10379+ printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
10380 (void *) gp->tpc,
10381 (void *) gp->o7,
10382 (void *) gp->i7,
10383diff --git a/arch/sparc/kernel/prom_common.c b/arch/sparc/kernel/prom_common.c
10384index 79cc0d1..ec62734 100644
10385--- a/arch/sparc/kernel/prom_common.c
10386+++ b/arch/sparc/kernel/prom_common.c
10387@@ -144,7 +144,7 @@ static int __init prom_common_nextprop(phandle node, char *prev, char *buf)
10388
10389 unsigned int prom_early_allocated __initdata;
10390
10391-static struct of_pdt_ops prom_sparc_ops __initdata = {
10392+static struct of_pdt_ops prom_sparc_ops __initconst = {
10393 .nextprop = prom_common_nextprop,
10394 .getproplen = prom_getproplen,
10395 .getproperty = prom_getproperty,
10396diff --git a/arch/sparc/kernel/ptrace_64.c b/arch/sparc/kernel/ptrace_64.c
10397index c13c9f2..d572c34 100644
10398--- a/arch/sparc/kernel/ptrace_64.c
10399+++ b/arch/sparc/kernel/ptrace_64.c
10400@@ -1060,6 +1060,10 @@ long arch_ptrace(struct task_struct *child, long request,
10401 return ret;
10402 }
10403
10404+#ifdef CONFIG_GRKERNSEC_SETXID
10405+extern void gr_delayed_cred_worker(void);
10406+#endif
10407+
10408 asmlinkage int syscall_trace_enter(struct pt_regs *regs)
10409 {
10410 int ret = 0;
10411@@ -1070,6 +1074,11 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs)
10412 if (test_thread_flag(TIF_NOHZ))
10413 user_exit();
10414
10415+#ifdef CONFIG_GRKERNSEC_SETXID
10416+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
10417+ gr_delayed_cred_worker();
10418+#endif
10419+
10420 if (test_thread_flag(TIF_SYSCALL_TRACE))
10421 ret = tracehook_report_syscall_entry(regs);
10422
10423@@ -1093,6 +1102,11 @@ asmlinkage void syscall_trace_leave(struct pt_regs *regs)
10424 if (test_thread_flag(TIF_NOHZ))
10425 user_exit();
10426
10427+#ifdef CONFIG_GRKERNSEC_SETXID
10428+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
10429+ gr_delayed_cred_worker();
10430+#endif
10431+
10432 audit_syscall_exit(regs);
10433
10434 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
10435diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
10436index b085311..6f885f7 100644
10437--- a/arch/sparc/kernel/smp_64.c
10438+++ b/arch/sparc/kernel/smp_64.c
10439@@ -870,8 +870,8 @@ extern unsigned long xcall_flush_dcache_page_cheetah;
10440 extern unsigned long xcall_flush_dcache_page_spitfire;
10441
10442 #ifdef CONFIG_DEBUG_DCFLUSH
10443-extern atomic_t dcpage_flushes;
10444-extern atomic_t dcpage_flushes_xcall;
10445+extern atomic_unchecked_t dcpage_flushes;
10446+extern atomic_unchecked_t dcpage_flushes_xcall;
10447 #endif
10448
10449 static inline void __local_flush_dcache_page(struct page *page)
10450@@ -895,7 +895,7 @@ void smp_flush_dcache_page_impl(struct page *page, int cpu)
10451 return;
10452
10453 #ifdef CONFIG_DEBUG_DCFLUSH
10454- atomic_inc(&dcpage_flushes);
10455+ atomic_inc_unchecked(&dcpage_flushes);
10456 #endif
10457
10458 this_cpu = get_cpu();
10459@@ -919,7 +919,7 @@ void smp_flush_dcache_page_impl(struct page *page, int cpu)
10460 xcall_deliver(data0, __pa(pg_addr),
10461 (u64) pg_addr, cpumask_of(cpu));
10462 #ifdef CONFIG_DEBUG_DCFLUSH
10463- atomic_inc(&dcpage_flushes_xcall);
10464+ atomic_inc_unchecked(&dcpage_flushes_xcall);
10465 #endif
10466 }
10467 }
10468@@ -938,7 +938,7 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
10469 preempt_disable();
10470
10471 #ifdef CONFIG_DEBUG_DCFLUSH
10472- atomic_inc(&dcpage_flushes);
10473+ atomic_inc_unchecked(&dcpage_flushes);
10474 #endif
10475 data0 = 0;
10476 pg_addr = page_address(page);
10477@@ -955,7 +955,7 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
10478 xcall_deliver(data0, __pa(pg_addr),
10479 (u64) pg_addr, cpu_online_mask);
10480 #ifdef CONFIG_DEBUG_DCFLUSH
10481- atomic_inc(&dcpage_flushes_xcall);
10482+ atomic_inc_unchecked(&dcpage_flushes_xcall);
10483 #endif
10484 }
10485 __local_flush_dcache_page(page);
10486diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c
10487index 3a8d184..49498a8 100644
10488--- a/arch/sparc/kernel/sys_sparc_32.c
10489+++ b/arch/sparc/kernel/sys_sparc_32.c
10490@@ -52,7 +52,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
10491 if (len > TASK_SIZE - PAGE_SIZE)
10492 return -ENOMEM;
10493 if (!addr)
10494- addr = TASK_UNMAPPED_BASE;
10495+ addr = current->mm->mmap_base;
10496
10497 info.flags = 0;
10498 info.length = len;
10499diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
10500index beb0b5a..5a153f7 100644
10501--- a/arch/sparc/kernel/sys_sparc_64.c
10502+++ b/arch/sparc/kernel/sys_sparc_64.c
10503@@ -88,13 +88,14 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
10504 struct vm_area_struct * vma;
10505 unsigned long task_size = TASK_SIZE;
10506 int do_color_align;
10507+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
10508 struct vm_unmapped_area_info info;
10509
10510 if (flags & MAP_FIXED) {
10511 /* We do not accept a shared mapping if it would violate
10512 * cache aliasing constraints.
10513 */
10514- if ((flags & MAP_SHARED) &&
10515+ if ((filp || (flags & MAP_SHARED)) &&
10516 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
10517 return -EINVAL;
10518 return addr;
10519@@ -109,6 +110,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
10520 if (filp || (flags & MAP_SHARED))
10521 do_color_align = 1;
10522
10523+#ifdef CONFIG_PAX_RANDMMAP
10524+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
10525+#endif
10526+
10527 if (addr) {
10528 if (do_color_align)
10529 addr = COLOR_ALIGN(addr, pgoff);
10530@@ -116,22 +121,28 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
10531 addr = PAGE_ALIGN(addr);
10532
10533 vma = find_vma(mm, addr);
10534- if (task_size - len >= addr &&
10535- (!vma || addr + len <= vma->vm_start))
10536+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
10537 return addr;
10538 }
10539
10540 info.flags = 0;
10541 info.length = len;
10542- info.low_limit = TASK_UNMAPPED_BASE;
10543+ info.low_limit = mm->mmap_base;
10544 info.high_limit = min(task_size, VA_EXCLUDE_START);
10545 info.align_mask = do_color_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
10546 info.align_offset = pgoff << PAGE_SHIFT;
10547+ info.threadstack_offset = offset;
10548 addr = vm_unmapped_area(&info);
10549
10550 if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) {
10551 VM_BUG_ON(addr != -ENOMEM);
10552 info.low_limit = VA_EXCLUDE_END;
10553+
10554+#ifdef CONFIG_PAX_RANDMMAP
10555+ if (mm->pax_flags & MF_PAX_RANDMMAP)
10556+ info.low_limit += mm->delta_mmap;
10557+#endif
10558+
10559 info.high_limit = task_size;
10560 addr = vm_unmapped_area(&info);
10561 }
10562@@ -149,6 +160,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10563 unsigned long task_size = STACK_TOP32;
10564 unsigned long addr = addr0;
10565 int do_color_align;
10566+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
10567 struct vm_unmapped_area_info info;
10568
10569 /* This should only ever run for 32-bit processes. */
10570@@ -158,7 +170,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10571 /* We do not accept a shared mapping if it would violate
10572 * cache aliasing constraints.
10573 */
10574- if ((flags & MAP_SHARED) &&
10575+ if ((filp || (flags & MAP_SHARED)) &&
10576 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
10577 return -EINVAL;
10578 return addr;
10579@@ -171,6 +183,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10580 if (filp || (flags & MAP_SHARED))
10581 do_color_align = 1;
10582
10583+#ifdef CONFIG_PAX_RANDMMAP
10584+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
10585+#endif
10586+
10587 /* requesting a specific address */
10588 if (addr) {
10589 if (do_color_align)
10590@@ -179,8 +195,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10591 addr = PAGE_ALIGN(addr);
10592
10593 vma = find_vma(mm, addr);
10594- if (task_size - len >= addr &&
10595- (!vma || addr + len <= vma->vm_start))
10596+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
10597 return addr;
10598 }
10599
10600@@ -190,6 +205,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10601 info.high_limit = mm->mmap_base;
10602 info.align_mask = do_color_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
10603 info.align_offset = pgoff << PAGE_SHIFT;
10604+ info.threadstack_offset = offset;
10605 addr = vm_unmapped_area(&info);
10606
10607 /*
10608@@ -202,6 +218,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10609 VM_BUG_ON(addr != -ENOMEM);
10610 info.flags = 0;
10611 info.low_limit = TASK_UNMAPPED_BASE;
10612+
10613+#ifdef CONFIG_PAX_RANDMMAP
10614+ if (mm->pax_flags & MF_PAX_RANDMMAP)
10615+ info.low_limit += mm->delta_mmap;
10616+#endif
10617+
10618 info.high_limit = STACK_TOP32;
10619 addr = vm_unmapped_area(&info);
10620 }
10621@@ -258,10 +280,14 @@ unsigned long get_fb_unmapped_area(struct file *filp, unsigned long orig_addr, u
10622 EXPORT_SYMBOL(get_fb_unmapped_area);
10623
10624 /* Essentially the same as PowerPC. */
10625-static unsigned long mmap_rnd(void)
10626+static unsigned long mmap_rnd(struct mm_struct *mm)
10627 {
10628 unsigned long rnd = 0UL;
10629
10630+#ifdef CONFIG_PAX_RANDMMAP
10631+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
10632+#endif
10633+
10634 if (current->flags & PF_RANDOMIZE) {
10635 unsigned long val = get_random_int();
10636 if (test_thread_flag(TIF_32BIT))
10637@@ -274,7 +300,7 @@ static unsigned long mmap_rnd(void)
10638
10639 void arch_pick_mmap_layout(struct mm_struct *mm)
10640 {
10641- unsigned long random_factor = mmap_rnd();
10642+ unsigned long random_factor = mmap_rnd(mm);
10643 unsigned long gap;
10644
10645 /*
10646@@ -287,6 +313,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
10647 gap == RLIM_INFINITY ||
10648 sysctl_legacy_va_layout) {
10649 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
10650+
10651+#ifdef CONFIG_PAX_RANDMMAP
10652+ if (mm->pax_flags & MF_PAX_RANDMMAP)
10653+ mm->mmap_base += mm->delta_mmap;
10654+#endif
10655+
10656 mm->get_unmapped_area = arch_get_unmapped_area;
10657 } else {
10658 /* We know it's 32-bit */
10659@@ -298,6 +330,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
10660 gap = (task_size / 6 * 5);
10661
10662 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
10663+
10664+#ifdef CONFIG_PAX_RANDMMAP
10665+ if (mm->pax_flags & MF_PAX_RANDMMAP)
10666+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
10667+#endif
10668+
10669 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
10670 }
10671 }
10672diff --git a/arch/sparc/kernel/syscalls.S b/arch/sparc/kernel/syscalls.S
10673index 33a17e7..d87fb1f 100644
10674--- a/arch/sparc/kernel/syscalls.S
10675+++ b/arch/sparc/kernel/syscalls.S
10676@@ -52,7 +52,7 @@ sys32_rt_sigreturn:
10677 #endif
10678 .align 32
10679 1: ldx [%g6 + TI_FLAGS], %l5
10680- andcc %l5, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0
10681+ andcc %l5, _TIF_WORK_SYSCALL, %g0
10682 be,pt %icc, rtrap
10683 nop
10684 call syscall_trace_leave
10685@@ -184,7 +184,7 @@ linux_sparc_syscall32:
10686
10687 srl %i3, 0, %o3 ! IEU0
10688 srl %i2, 0, %o2 ! IEU0 Group
10689- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0
10690+ andcc %l0, _TIF_WORK_SYSCALL, %g0
10691 bne,pn %icc, linux_syscall_trace32 ! CTI
10692 mov %i0, %l5 ! IEU1
10693 5: call %l7 ! CTI Group brk forced
10694@@ -208,7 +208,7 @@ linux_sparc_syscall:
10695
10696 mov %i3, %o3 ! IEU1
10697 mov %i4, %o4 ! IEU0 Group
10698- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0
10699+ andcc %l0, _TIF_WORK_SYSCALL, %g0
10700 bne,pn %icc, linux_syscall_trace ! CTI Group
10701 mov %i0, %l5 ! IEU0
10702 2: call %l7 ! CTI Group brk forced
10703@@ -223,7 +223,7 @@ ret_sys_call:
10704
10705 cmp %o0, -ERESTART_RESTARTBLOCK
10706 bgeu,pn %xcc, 1f
10707- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0
10708+ andcc %l0, _TIF_WORK_SYSCALL, %g0
10709 ldx [%sp + PTREGS_OFF + PT_V9_TNPC], %l1 ! pc = npc
10710
10711 2:
10712diff --git a/arch/sparc/kernel/traps_32.c b/arch/sparc/kernel/traps_32.c
10713index 6629829..036032d 100644
10714--- a/arch/sparc/kernel/traps_32.c
10715+++ b/arch/sparc/kernel/traps_32.c
10716@@ -44,6 +44,8 @@ static void instruction_dump(unsigned long *pc)
10717 #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
10718 #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
10719
10720+extern void gr_handle_kernel_exploit(void);
10721+
10722 void die_if_kernel(char *str, struct pt_regs *regs)
10723 {
10724 static int die_counter;
10725@@ -76,15 +78,17 @@ void die_if_kernel(char *str, struct pt_regs *regs)
10726 count++ < 30 &&
10727 (((unsigned long) rw) >= PAGE_OFFSET) &&
10728 !(((unsigned long) rw) & 0x7)) {
10729- printk("Caller[%08lx]: %pS\n", rw->ins[7],
10730+ printk("Caller[%08lx]: %pA\n", rw->ins[7],
10731 (void *) rw->ins[7]);
10732 rw = (struct reg_window32 *)rw->ins[6];
10733 }
10734 }
10735 printk("Instruction DUMP:");
10736 instruction_dump ((unsigned long *) regs->pc);
10737- if(regs->psr & PSR_PS)
10738+ if(regs->psr & PSR_PS) {
10739+ gr_handle_kernel_exploit();
10740 do_exit(SIGKILL);
10741+ }
10742 do_exit(SIGSEGV);
10743 }
10744
10745diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
10746index 4ced92f..965eeed 100644
10747--- a/arch/sparc/kernel/traps_64.c
10748+++ b/arch/sparc/kernel/traps_64.c
10749@@ -77,7 +77,7 @@ static void dump_tl1_traplog(struct tl1_traplog *p)
10750 i + 1,
10751 p->trapstack[i].tstate, p->trapstack[i].tpc,
10752 p->trapstack[i].tnpc, p->trapstack[i].tt);
10753- printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
10754+ printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
10755 }
10756 }
10757
10758@@ -97,6 +97,12 @@ void bad_trap(struct pt_regs *regs, long lvl)
10759
10760 lvl -= 0x100;
10761 if (regs->tstate & TSTATE_PRIV) {
10762+
10763+#ifdef CONFIG_PAX_REFCOUNT
10764+ if (lvl == 6)
10765+ pax_report_refcount_overflow(regs);
10766+#endif
10767+
10768 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
10769 die_if_kernel(buffer, regs);
10770 }
10771@@ -115,11 +121,16 @@ void bad_trap(struct pt_regs *regs, long lvl)
10772 void bad_trap_tl1(struct pt_regs *regs, long lvl)
10773 {
10774 char buffer[32];
10775-
10776+
10777 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
10778 0, lvl, SIGTRAP) == NOTIFY_STOP)
10779 return;
10780
10781+#ifdef CONFIG_PAX_REFCOUNT
10782+ if (lvl == 6)
10783+ pax_report_refcount_overflow(regs);
10784+#endif
10785+
10786 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
10787
10788 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
10789@@ -1149,7 +1160,7 @@ static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *in
10790 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
10791 printk("%s" "ERROR(%d): ",
10792 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
10793- printk("TPC<%pS>\n", (void *) regs->tpc);
10794+ printk("TPC<%pA>\n", (void *) regs->tpc);
10795 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
10796 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
10797 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
10798@@ -1756,7 +1767,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
10799 smp_processor_id(),
10800 (type & 0x1) ? 'I' : 'D',
10801 regs->tpc);
10802- printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
10803+ printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
10804 panic("Irrecoverable Cheetah+ parity error.");
10805 }
10806
10807@@ -1764,7 +1775,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
10808 smp_processor_id(),
10809 (type & 0x1) ? 'I' : 'D',
10810 regs->tpc);
10811- printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
10812+ printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
10813 }
10814
10815 struct sun4v_error_entry {
10816@@ -1837,8 +1848,8 @@ struct sun4v_error_entry {
10817 /*0x38*/u64 reserved_5;
10818 };
10819
10820-static atomic_t sun4v_resum_oflow_cnt = ATOMIC_INIT(0);
10821-static atomic_t sun4v_nonresum_oflow_cnt = ATOMIC_INIT(0);
10822+static atomic_unchecked_t sun4v_resum_oflow_cnt = ATOMIC_INIT(0);
10823+static atomic_unchecked_t sun4v_nonresum_oflow_cnt = ATOMIC_INIT(0);
10824
10825 static const char *sun4v_err_type_to_str(u8 type)
10826 {
10827@@ -1930,7 +1941,7 @@ static void sun4v_report_real_raddr(const char *pfx, struct pt_regs *regs)
10828 }
10829
10830 static void sun4v_log_error(struct pt_regs *regs, struct sun4v_error_entry *ent,
10831- int cpu, const char *pfx, atomic_t *ocnt)
10832+ int cpu, const char *pfx, atomic_unchecked_t *ocnt)
10833 {
10834 u64 *raw_ptr = (u64 *) ent;
10835 u32 attrs;
10836@@ -1988,8 +1999,8 @@ static void sun4v_log_error(struct pt_regs *regs, struct sun4v_error_entry *ent,
10837
10838 show_regs(regs);
10839
10840- if ((cnt = atomic_read(ocnt)) != 0) {
10841- atomic_set(ocnt, 0);
10842+ if ((cnt = atomic_read_unchecked(ocnt)) != 0) {
10843+ atomic_set_unchecked(ocnt, 0);
10844 wmb();
10845 printk("%s: Queue overflowed %d times.\n",
10846 pfx, cnt);
10847@@ -2046,7 +2057,7 @@ out:
10848 */
10849 void sun4v_resum_overflow(struct pt_regs *regs)
10850 {
10851- atomic_inc(&sun4v_resum_oflow_cnt);
10852+ atomic_inc_unchecked(&sun4v_resum_oflow_cnt);
10853 }
10854
10855 /* We run with %pil set to PIL_NORMAL_MAX and PSTATE_IE enabled in %pstate.
10856@@ -2099,7 +2110,7 @@ void sun4v_nonresum_overflow(struct pt_regs *regs)
10857 /* XXX Actually even this can make not that much sense. Perhaps
10858 * XXX we should just pull the plug and panic directly from here?
10859 */
10860- atomic_inc(&sun4v_nonresum_oflow_cnt);
10861+ atomic_inc_unchecked(&sun4v_nonresum_oflow_cnt);
10862 }
10863
10864 unsigned long sun4v_err_itlb_vaddr;
10865@@ -2114,9 +2125,9 @@ void sun4v_itlb_error_report(struct pt_regs *regs, int tl)
10866
10867 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
10868 regs->tpc, tl);
10869- printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
10870+ printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
10871 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
10872- printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
10873+ printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
10874 (void *) regs->u_regs[UREG_I7]);
10875 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
10876 "pte[%lx] error[%lx]\n",
10877@@ -2138,9 +2149,9 @@ void sun4v_dtlb_error_report(struct pt_regs *regs, int tl)
10878
10879 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
10880 regs->tpc, tl);
10881- printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
10882+ printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
10883 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
10884- printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
10885+ printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
10886 (void *) regs->u_regs[UREG_I7]);
10887 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
10888 "pte[%lx] error[%lx]\n",
10889@@ -2359,13 +2370,13 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
10890 fp = (unsigned long)sf->fp + STACK_BIAS;
10891 }
10892
10893- printk(" [%016lx] %pS\n", pc, (void *) pc);
10894+ printk(" [%016lx] %pA\n", pc, (void *) pc);
10895 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
10896 if ((pc + 8UL) == (unsigned long) &return_to_handler) {
10897 int index = tsk->curr_ret_stack;
10898 if (tsk->ret_stack && index >= graph) {
10899 pc = tsk->ret_stack[index - graph].ret;
10900- printk(" [%016lx] %pS\n", pc, (void *) pc);
10901+ printk(" [%016lx] %pA\n", pc, (void *) pc);
10902 graph++;
10903 }
10904 }
10905@@ -2383,6 +2394,8 @@ static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
10906 return (struct reg_window *) (fp + STACK_BIAS);
10907 }
10908
10909+extern void gr_handle_kernel_exploit(void);
10910+
10911 void die_if_kernel(char *str, struct pt_regs *regs)
10912 {
10913 static int die_counter;
10914@@ -2411,7 +2424,7 @@ void die_if_kernel(char *str, struct pt_regs *regs)
10915 while (rw &&
10916 count++ < 30 &&
10917 kstack_valid(tp, (unsigned long) rw)) {
10918- printk("Caller[%016lx]: %pS\n", rw->ins[7],
10919+ printk("Caller[%016lx]: %pA\n", rw->ins[7],
10920 (void *) rw->ins[7]);
10921
10922 rw = kernel_stack_up(rw);
10923@@ -2424,8 +2437,10 @@ void die_if_kernel(char *str, struct pt_regs *regs)
10924 }
10925 user_instruction_dump ((unsigned int __user *) regs->tpc);
10926 }
10927- if (regs->tstate & TSTATE_PRIV)
10928+ if (regs->tstate & TSTATE_PRIV) {
10929+ gr_handle_kernel_exploit();
10930 do_exit(SIGKILL);
10931+ }
10932 do_exit(SIGSEGV);
10933 }
10934 EXPORT_SYMBOL(die_if_kernel);
10935diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c
10936index 3c1a7cb..9046547 100644
10937--- a/arch/sparc/kernel/unaligned_64.c
10938+++ b/arch/sparc/kernel/unaligned_64.c
10939@@ -166,17 +166,23 @@ static unsigned long *fetch_reg_addr(unsigned int reg, struct pt_regs *regs)
10940 unsigned long compute_effective_address(struct pt_regs *regs,
10941 unsigned int insn, unsigned int rd)
10942 {
10943+ int from_kernel = (regs->tstate & TSTATE_PRIV) != 0;
10944 unsigned int rs1 = (insn >> 14) & 0x1f;
10945 unsigned int rs2 = insn & 0x1f;
10946- int from_kernel = (regs->tstate & TSTATE_PRIV) != 0;
10947+ unsigned long addr;
10948
10949 if (insn & 0x2000) {
10950 maybe_flush_windows(rs1, 0, rd, from_kernel);
10951- return (fetch_reg(rs1, regs) + sign_extend_imm13(insn));
10952+ addr = (fetch_reg(rs1, regs) + sign_extend_imm13(insn));
10953 } else {
10954 maybe_flush_windows(rs1, rs2, rd, from_kernel);
10955- return (fetch_reg(rs1, regs) + fetch_reg(rs2, regs));
10956+ addr = (fetch_reg(rs1, regs) + fetch_reg(rs2, regs));
10957 }
10958+
10959+ if (!from_kernel && test_thread_flag(TIF_32BIT))
10960+ addr &= 0xffffffff;
10961+
10962+ return addr;
10963 }
10964
10965 /* This is just to make gcc think die_if_kernel does return... */
10966@@ -289,7 +295,7 @@ static void log_unaligned(struct pt_regs *regs)
10967 static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
10968
10969 if (__ratelimit(&ratelimit)) {
10970- printk("Kernel unaligned access at TPC[%lx] %pS\n",
10971+ printk("Kernel unaligned access at TPC[%lx] %pA\n",
10972 regs->tpc, (void *) regs->tpc);
10973 }
10974 }
10975diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
10976index dbe119b..089c7c1 100644
10977--- a/arch/sparc/lib/Makefile
10978+++ b/arch/sparc/lib/Makefile
10979@@ -2,7 +2,7 @@
10980 #
10981
10982 asflags-y := -ansi -DST_DIV0=0x02
10983-ccflags-y := -Werror
10984+#ccflags-y := -Werror
10985
10986 lib-$(CONFIG_SPARC32) += ashrdi3.o
10987 lib-$(CONFIG_SPARC32) += memcpy.o memset.o
10988diff --git a/arch/sparc/lib/atomic_64.S b/arch/sparc/lib/atomic_64.S
10989index 85c233d..68500e0 100644
10990--- a/arch/sparc/lib/atomic_64.S
10991+++ b/arch/sparc/lib/atomic_64.S
10992@@ -17,7 +17,12 @@
10993 ENTRY(atomic_add) /* %o0 = increment, %o1 = atomic_ptr */
10994 BACKOFF_SETUP(%o2)
10995 1: lduw [%o1], %g1
10996- add %g1, %o0, %g7
10997+ addcc %g1, %o0, %g7
10998+
10999+#ifdef CONFIG_PAX_REFCOUNT
11000+ tvs %icc, 6
11001+#endif
11002+
11003 cas [%o1], %g1, %g7
11004 cmp %g1, %g7
11005 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
11006@@ -27,10 +32,28 @@ ENTRY(atomic_add) /* %o0 = increment, %o1 = atomic_ptr */
11007 2: BACKOFF_SPIN(%o2, %o3, 1b)
11008 ENDPROC(atomic_add)
11009
11010+ENTRY(atomic_add_unchecked) /* %o0 = increment, %o1 = atomic_ptr */
11011+ BACKOFF_SETUP(%o2)
11012+1: lduw [%o1], %g1
11013+ add %g1, %o0, %g7
11014+ cas [%o1], %g1, %g7
11015+ cmp %g1, %g7
11016+ bne,pn %icc, 2f
11017+ nop
11018+ retl
11019+ nop
11020+2: BACKOFF_SPIN(%o2, %o3, 1b)
11021+ENDPROC(atomic_add_unchecked)
11022+
11023 ENTRY(atomic_sub) /* %o0 = decrement, %o1 = atomic_ptr */
11024 BACKOFF_SETUP(%o2)
11025 1: lduw [%o1], %g1
11026- sub %g1, %o0, %g7
11027+ subcc %g1, %o0, %g7
11028+
11029+#ifdef CONFIG_PAX_REFCOUNT
11030+ tvs %icc, 6
11031+#endif
11032+
11033 cas [%o1], %g1, %g7
11034 cmp %g1, %g7
11035 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
11036@@ -40,10 +63,28 @@ ENTRY(atomic_sub) /* %o0 = decrement, %o1 = atomic_ptr */
11037 2: BACKOFF_SPIN(%o2, %o3, 1b)
11038 ENDPROC(atomic_sub)
11039
11040+ENTRY(atomic_sub_unchecked) /* %o0 = decrement, %o1 = atomic_ptr */
11041+ BACKOFF_SETUP(%o2)
11042+1: lduw [%o1], %g1
11043+ sub %g1, %o0, %g7
11044+ cas [%o1], %g1, %g7
11045+ cmp %g1, %g7
11046+ bne,pn %icc, 2f
11047+ nop
11048+ retl
11049+ nop
11050+2: BACKOFF_SPIN(%o2, %o3, 1b)
11051+ENDPROC(atomic_sub_unchecked)
11052+
11053 ENTRY(atomic_add_ret) /* %o0 = increment, %o1 = atomic_ptr */
11054 BACKOFF_SETUP(%o2)
11055 1: lduw [%o1], %g1
11056- add %g1, %o0, %g7
11057+ addcc %g1, %o0, %g7
11058+
11059+#ifdef CONFIG_PAX_REFCOUNT
11060+ tvs %icc, 6
11061+#endif
11062+
11063 cas [%o1], %g1, %g7
11064 cmp %g1, %g7
11065 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
11066@@ -53,10 +94,29 @@ ENTRY(atomic_add_ret) /* %o0 = increment, %o1 = atomic_ptr */
11067 2: BACKOFF_SPIN(%o2, %o3, 1b)
11068 ENDPROC(atomic_add_ret)
11069
11070+ENTRY(atomic_add_ret_unchecked) /* %o0 = increment, %o1 = atomic_ptr */
11071+ BACKOFF_SETUP(%o2)
11072+1: lduw [%o1], %g1
11073+ addcc %g1, %o0, %g7
11074+ cas [%o1], %g1, %g7
11075+ cmp %g1, %g7
11076+ bne,pn %icc, 2f
11077+ add %g7, %o0, %g7
11078+ sra %g7, 0, %o0
11079+ retl
11080+ nop
11081+2: BACKOFF_SPIN(%o2, %o3, 1b)
11082+ENDPROC(atomic_add_ret_unchecked)
11083+
11084 ENTRY(atomic_sub_ret) /* %o0 = decrement, %o1 = atomic_ptr */
11085 BACKOFF_SETUP(%o2)
11086 1: lduw [%o1], %g1
11087- sub %g1, %o0, %g7
11088+ subcc %g1, %o0, %g7
11089+
11090+#ifdef CONFIG_PAX_REFCOUNT
11091+ tvs %icc, 6
11092+#endif
11093+
11094 cas [%o1], %g1, %g7
11095 cmp %g1, %g7
11096 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
11097@@ -69,7 +129,12 @@ ENDPROC(atomic_sub_ret)
11098 ENTRY(atomic64_add) /* %o0 = increment, %o1 = atomic_ptr */
11099 BACKOFF_SETUP(%o2)
11100 1: ldx [%o1], %g1
11101- add %g1, %o0, %g7
11102+ addcc %g1, %o0, %g7
11103+
11104+#ifdef CONFIG_PAX_REFCOUNT
11105+ tvs %xcc, 6
11106+#endif
11107+
11108 casx [%o1], %g1, %g7
11109 cmp %g1, %g7
11110 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
11111@@ -79,10 +144,28 @@ ENTRY(atomic64_add) /* %o0 = increment, %o1 = atomic_ptr */
11112 2: BACKOFF_SPIN(%o2, %o3, 1b)
11113 ENDPROC(atomic64_add)
11114
11115+ENTRY(atomic64_add_unchecked) /* %o0 = increment, %o1 = atomic_ptr */
11116+ BACKOFF_SETUP(%o2)
11117+1: ldx [%o1], %g1
11118+ addcc %g1, %o0, %g7
11119+ casx [%o1], %g1, %g7
11120+ cmp %g1, %g7
11121+ bne,pn %xcc, 2f
11122+ nop
11123+ retl
11124+ nop
11125+2: BACKOFF_SPIN(%o2, %o3, 1b)
11126+ENDPROC(atomic64_add_unchecked)
11127+
11128 ENTRY(atomic64_sub) /* %o0 = decrement, %o1 = atomic_ptr */
11129 BACKOFF_SETUP(%o2)
11130 1: ldx [%o1], %g1
11131- sub %g1, %o0, %g7
11132+ subcc %g1, %o0, %g7
11133+
11134+#ifdef CONFIG_PAX_REFCOUNT
11135+ tvs %xcc, 6
11136+#endif
11137+
11138 casx [%o1], %g1, %g7
11139 cmp %g1, %g7
11140 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
11141@@ -92,10 +175,28 @@ ENTRY(atomic64_sub) /* %o0 = decrement, %o1 = atomic_ptr */
11142 2: BACKOFF_SPIN(%o2, %o3, 1b)
11143 ENDPROC(atomic64_sub)
11144
11145+ENTRY(atomic64_sub_unchecked) /* %o0 = decrement, %o1 = atomic_ptr */
11146+ BACKOFF_SETUP(%o2)
11147+1: ldx [%o1], %g1
11148+ subcc %g1, %o0, %g7
11149+ casx [%o1], %g1, %g7
11150+ cmp %g1, %g7
11151+ bne,pn %xcc, 2f
11152+ nop
11153+ retl
11154+ nop
11155+2: BACKOFF_SPIN(%o2, %o3, 1b)
11156+ENDPROC(atomic64_sub_unchecked)
11157+
11158 ENTRY(atomic64_add_ret) /* %o0 = increment, %o1 = atomic_ptr */
11159 BACKOFF_SETUP(%o2)
11160 1: ldx [%o1], %g1
11161- add %g1, %o0, %g7
11162+ addcc %g1, %o0, %g7
11163+
11164+#ifdef CONFIG_PAX_REFCOUNT
11165+ tvs %xcc, 6
11166+#endif
11167+
11168 casx [%o1], %g1, %g7
11169 cmp %g1, %g7
11170 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
11171@@ -105,10 +206,29 @@ ENTRY(atomic64_add_ret) /* %o0 = increment, %o1 = atomic_ptr */
11172 2: BACKOFF_SPIN(%o2, %o3, 1b)
11173 ENDPROC(atomic64_add_ret)
11174
11175+ENTRY(atomic64_add_ret_unchecked) /* %o0 = increment, %o1 = atomic_ptr */
11176+ BACKOFF_SETUP(%o2)
11177+1: ldx [%o1], %g1
11178+ addcc %g1, %o0, %g7
11179+ casx [%o1], %g1, %g7
11180+ cmp %g1, %g7
11181+ bne,pn %xcc, 2f
11182+ add %g7, %o0, %g7
11183+ mov %g7, %o0
11184+ retl
11185+ nop
11186+2: BACKOFF_SPIN(%o2, %o3, 1b)
11187+ENDPROC(atomic64_add_ret_unchecked)
11188+
11189 ENTRY(atomic64_sub_ret) /* %o0 = decrement, %o1 = atomic_ptr */
11190 BACKOFF_SETUP(%o2)
11191 1: ldx [%o1], %g1
11192- sub %g1, %o0, %g7
11193+ subcc %g1, %o0, %g7
11194+
11195+#ifdef CONFIG_PAX_REFCOUNT
11196+ tvs %xcc, 6
11197+#endif
11198+
11199 casx [%o1], %g1, %g7
11200 cmp %g1, %g7
11201 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
11202diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c
11203index 323335b..ed85ea2 100644
11204--- a/arch/sparc/lib/ksyms.c
11205+++ b/arch/sparc/lib/ksyms.c
11206@@ -100,12 +100,18 @@ EXPORT_SYMBOL(__clear_user);
11207
11208 /* Atomic counter implementation. */
11209 EXPORT_SYMBOL(atomic_add);
11210+EXPORT_SYMBOL(atomic_add_unchecked);
11211 EXPORT_SYMBOL(atomic_add_ret);
11212+EXPORT_SYMBOL(atomic_add_ret_unchecked);
11213 EXPORT_SYMBOL(atomic_sub);
11214+EXPORT_SYMBOL(atomic_sub_unchecked);
11215 EXPORT_SYMBOL(atomic_sub_ret);
11216 EXPORT_SYMBOL(atomic64_add);
11217+EXPORT_SYMBOL(atomic64_add_unchecked);
11218 EXPORT_SYMBOL(atomic64_add_ret);
11219+EXPORT_SYMBOL(atomic64_add_ret_unchecked);
11220 EXPORT_SYMBOL(atomic64_sub);
11221+EXPORT_SYMBOL(atomic64_sub_unchecked);
11222 EXPORT_SYMBOL(atomic64_sub_ret);
11223 EXPORT_SYMBOL(atomic64_dec_if_positive);
11224
11225diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile
11226index 30c3ecc..736f015 100644
11227--- a/arch/sparc/mm/Makefile
11228+++ b/arch/sparc/mm/Makefile
11229@@ -2,7 +2,7 @@
11230 #
11231
11232 asflags-y := -ansi
11233-ccflags-y := -Werror
11234+#ccflags-y := -Werror
11235
11236 obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o gup.o
11237 obj-y += fault_$(BITS).o
11238diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
11239index 59dbd46..1dd7f5e 100644
11240--- a/arch/sparc/mm/fault_32.c
11241+++ b/arch/sparc/mm/fault_32.c
11242@@ -21,6 +21,9 @@
11243 #include <linux/perf_event.h>
11244 #include <linux/interrupt.h>
11245 #include <linux/kdebug.h>
11246+#include <linux/slab.h>
11247+#include <linux/pagemap.h>
11248+#include <linux/compiler.h>
11249
11250 #include <asm/page.h>
11251 #include <asm/pgtable.h>
11252@@ -159,6 +162,277 @@ static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
11253 return safe_compute_effective_address(regs, insn);
11254 }
11255
11256+#ifdef CONFIG_PAX_PAGEEXEC
11257+#ifdef CONFIG_PAX_DLRESOLVE
11258+static void pax_emuplt_close(struct vm_area_struct *vma)
11259+{
11260+ vma->vm_mm->call_dl_resolve = 0UL;
11261+}
11262+
11263+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
11264+{
11265+ unsigned int *kaddr;
11266+
11267+ vmf->page = alloc_page(GFP_HIGHUSER);
11268+ if (!vmf->page)
11269+ return VM_FAULT_OOM;
11270+
11271+ kaddr = kmap(vmf->page);
11272+ memset(kaddr, 0, PAGE_SIZE);
11273+ kaddr[0] = 0x9DE3BFA8U; /* save */
11274+ flush_dcache_page(vmf->page);
11275+ kunmap(vmf->page);
11276+ return VM_FAULT_MAJOR;
11277+}
11278+
11279+static const struct vm_operations_struct pax_vm_ops = {
11280+ .close = pax_emuplt_close,
11281+ .fault = pax_emuplt_fault
11282+};
11283+
11284+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
11285+{
11286+ int ret;
11287+
11288+ INIT_LIST_HEAD(&vma->anon_vma_chain);
11289+ vma->vm_mm = current->mm;
11290+ vma->vm_start = addr;
11291+ vma->vm_end = addr + PAGE_SIZE;
11292+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
11293+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
11294+ vma->vm_ops = &pax_vm_ops;
11295+
11296+ ret = insert_vm_struct(current->mm, vma);
11297+ if (ret)
11298+ return ret;
11299+
11300+ ++current->mm->total_vm;
11301+ return 0;
11302+}
11303+#endif
11304+
11305+/*
11306+ * PaX: decide what to do with offenders (regs->pc = fault address)
11307+ *
11308+ * returns 1 when task should be killed
11309+ * 2 when patched PLT trampoline was detected
11310+ * 3 when unpatched PLT trampoline was detected
11311+ */
11312+static int pax_handle_fetch_fault(struct pt_regs *regs)
11313+{
11314+
11315+#ifdef CONFIG_PAX_EMUPLT
11316+ int err;
11317+
11318+ do { /* PaX: patched PLT emulation #1 */
11319+ unsigned int sethi1, sethi2, jmpl;
11320+
11321+ err = get_user(sethi1, (unsigned int *)regs->pc);
11322+ err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
11323+ err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
11324+
11325+ if (err)
11326+ break;
11327+
11328+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
11329+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
11330+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
11331+ {
11332+ unsigned int addr;
11333+
11334+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
11335+ addr = regs->u_regs[UREG_G1];
11336+ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
11337+ regs->pc = addr;
11338+ regs->npc = addr+4;
11339+ return 2;
11340+ }
11341+ } while (0);
11342+
11343+ do { /* PaX: patched PLT emulation #2 */
11344+ unsigned int ba;
11345+
11346+ err = get_user(ba, (unsigned int *)regs->pc);
11347+
11348+ if (err)
11349+ break;
11350+
11351+ if ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30480000U) {
11352+ unsigned int addr;
11353+
11354+ if ((ba & 0xFFC00000U) == 0x30800000U)
11355+ addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
11356+ else
11357+ addr = regs->pc + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
11358+ regs->pc = addr;
11359+ regs->npc = addr+4;
11360+ return 2;
11361+ }
11362+ } while (0);
11363+
11364+ do { /* PaX: patched PLT emulation #3 */
11365+ unsigned int sethi, bajmpl, nop;
11366+
11367+ err = get_user(sethi, (unsigned int *)regs->pc);
11368+ err |= get_user(bajmpl, (unsigned int *)(regs->pc+4));
11369+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
11370+
11371+ if (err)
11372+ break;
11373+
11374+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
11375+ ((bajmpl & 0xFFFFE000U) == 0x81C06000U || (bajmpl & 0xFFF80000U) == 0x30480000U) &&
11376+ nop == 0x01000000U)
11377+ {
11378+ unsigned int addr;
11379+
11380+ addr = (sethi & 0x003FFFFFU) << 10;
11381+ regs->u_regs[UREG_G1] = addr;
11382+ if ((bajmpl & 0xFFFFE000U) == 0x81C06000U)
11383+ addr += (((bajmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
11384+ else
11385+ addr = regs->pc + ((((bajmpl | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
11386+ regs->pc = addr;
11387+ regs->npc = addr+4;
11388+ return 2;
11389+ }
11390+ } while (0);
11391+
11392+ do { /* PaX: unpatched PLT emulation step 1 */
11393+ unsigned int sethi, ba, nop;
11394+
11395+ err = get_user(sethi, (unsigned int *)regs->pc);
11396+ err |= get_user(ba, (unsigned int *)(regs->pc+4));
11397+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
11398+
11399+ if (err)
11400+ break;
11401+
11402+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
11403+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
11404+ nop == 0x01000000U)
11405+ {
11406+ unsigned int addr, save, call;
11407+
11408+ if ((ba & 0xFFC00000U) == 0x30800000U)
11409+ addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
11410+ else
11411+ addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
11412+
11413+ err = get_user(save, (unsigned int *)addr);
11414+ err |= get_user(call, (unsigned int *)(addr+4));
11415+ err |= get_user(nop, (unsigned int *)(addr+8));
11416+ if (err)
11417+ break;
11418+
11419+#ifdef CONFIG_PAX_DLRESOLVE
11420+ if (save == 0x9DE3BFA8U &&
11421+ (call & 0xC0000000U) == 0x40000000U &&
11422+ nop == 0x01000000U)
11423+ {
11424+ struct vm_area_struct *vma;
11425+ unsigned long call_dl_resolve;
11426+
11427+ down_read(&current->mm->mmap_sem);
11428+ call_dl_resolve = current->mm->call_dl_resolve;
11429+ up_read(&current->mm->mmap_sem);
11430+ if (likely(call_dl_resolve))
11431+ goto emulate;
11432+
11433+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
11434+
11435+ down_write(&current->mm->mmap_sem);
11436+ if (current->mm->call_dl_resolve) {
11437+ call_dl_resolve = current->mm->call_dl_resolve;
11438+ up_write(&current->mm->mmap_sem);
11439+ if (vma)
11440+ kmem_cache_free(vm_area_cachep, vma);
11441+ goto emulate;
11442+ }
11443+
11444+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
11445+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
11446+ up_write(&current->mm->mmap_sem);
11447+ if (vma)
11448+ kmem_cache_free(vm_area_cachep, vma);
11449+ return 1;
11450+ }
11451+
11452+ if (pax_insert_vma(vma, call_dl_resolve)) {
11453+ up_write(&current->mm->mmap_sem);
11454+ kmem_cache_free(vm_area_cachep, vma);
11455+ return 1;
11456+ }
11457+
11458+ current->mm->call_dl_resolve = call_dl_resolve;
11459+ up_write(&current->mm->mmap_sem);
11460+
11461+emulate:
11462+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
11463+ regs->pc = call_dl_resolve;
11464+ regs->npc = addr+4;
11465+ return 3;
11466+ }
11467+#endif
11468+
11469+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
11470+ if ((save & 0xFFC00000U) == 0x05000000U &&
11471+ (call & 0xFFFFE000U) == 0x85C0A000U &&
11472+ nop == 0x01000000U)
11473+ {
11474+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
11475+ regs->u_regs[UREG_G2] = addr + 4;
11476+ addr = (save & 0x003FFFFFU) << 10;
11477+ addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
11478+ regs->pc = addr;
11479+ regs->npc = addr+4;
11480+ return 3;
11481+ }
11482+ }
11483+ } while (0);
11484+
11485+ do { /* PaX: unpatched PLT emulation step 2 */
11486+ unsigned int save, call, nop;
11487+
11488+ err = get_user(save, (unsigned int *)(regs->pc-4));
11489+ err |= get_user(call, (unsigned int *)regs->pc);
11490+ err |= get_user(nop, (unsigned int *)(regs->pc+4));
11491+ if (err)
11492+ break;
11493+
11494+ if (save == 0x9DE3BFA8U &&
11495+ (call & 0xC0000000U) == 0x40000000U &&
11496+ nop == 0x01000000U)
11497+ {
11498+ unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
11499+
11500+ regs->u_regs[UREG_RETPC] = regs->pc;
11501+ regs->pc = dl_resolve;
11502+ regs->npc = dl_resolve+4;
11503+ return 3;
11504+ }
11505+ } while (0);
11506+#endif
11507+
11508+ return 1;
11509+}
11510+
11511+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
11512+{
11513+ unsigned long i;
11514+
11515+ printk(KERN_ERR "PAX: bytes at PC: ");
11516+ for (i = 0; i < 8; i++) {
11517+ unsigned int c;
11518+ if (get_user(c, (unsigned int *)pc+i))
11519+ printk(KERN_CONT "???????? ");
11520+ else
11521+ printk(KERN_CONT "%08x ", c);
11522+ }
11523+ printk("\n");
11524+}
11525+#endif
11526+
11527 static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
11528 int text_fault)
11529 {
11530@@ -229,6 +503,24 @@ good_area:
11531 if (!(vma->vm_flags & VM_WRITE))
11532 goto bad_area;
11533 } else {
11534+
11535+#ifdef CONFIG_PAX_PAGEEXEC
11536+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
11537+ up_read(&mm->mmap_sem);
11538+ switch (pax_handle_fetch_fault(regs)) {
11539+
11540+#ifdef CONFIG_PAX_EMUPLT
11541+ case 2:
11542+ case 3:
11543+ return;
11544+#endif
11545+
11546+ }
11547+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
11548+ do_group_exit(SIGKILL);
11549+ }
11550+#endif
11551+
11552 /* Allow reads even for write-only mappings */
11553 if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
11554 goto bad_area;
11555diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
11556index 69bb818..3542236 100644
11557--- a/arch/sparc/mm/fault_64.c
11558+++ b/arch/sparc/mm/fault_64.c
11559@@ -22,6 +22,9 @@
11560 #include <linux/kdebug.h>
11561 #include <linux/percpu.h>
11562 #include <linux/context_tracking.h>
11563+#include <linux/slab.h>
11564+#include <linux/pagemap.h>
11565+#include <linux/compiler.h>
11566
11567 #include <asm/page.h>
11568 #include <asm/pgtable.h>
11569@@ -75,7 +78,7 @@ static void __kprobes bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr)
11570 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
11571 regs->tpc);
11572 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
11573- printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
11574+ printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
11575 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
11576 dump_stack();
11577 unhandled_fault(regs->tpc, current, regs);
11578@@ -96,38 +99,51 @@ static unsigned int get_user_insn(unsigned long tpc)
11579 pte_t *ptep, pte;
11580 unsigned long pa;
11581 u32 insn = 0;
11582- unsigned long pstate;
11583
11584- if (pgd_none(*pgdp))
11585- goto outret;
11586+ if (pgd_none(*pgdp) || unlikely(pgd_bad(*pgdp)))
11587+ goto out;
11588 pudp = pud_offset(pgdp, tpc);
11589- if (pud_none(*pudp))
11590- goto outret;
11591- pmdp = pmd_offset(pudp, tpc);
11592- if (pmd_none(*pmdp))
11593- goto outret;
11594+ if (pud_none(*pudp) || unlikely(pud_bad(*pudp)))
11595+ goto out;
11596
11597 /* This disables preemption for us as well. */
11598- __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
11599- __asm__ __volatile__("wrpr %0, %1, %%pstate"
11600- : : "r" (pstate), "i" (PSTATE_IE));
11601- ptep = pte_offset_map(pmdp, tpc);
11602- pte = *ptep;
11603- if (!pte_present(pte))
11604- goto out;
11605+ local_irq_disable();
11606
11607- pa = (pte_pfn(pte) << PAGE_SHIFT);
11608- pa += (tpc & ~PAGE_MASK);
11609+ pmdp = pmd_offset(pudp, tpc);
11610+ if (pmd_none(*pmdp) || unlikely(pmd_bad(*pmdp)))
11611+ goto out_irq_enable;
11612
11613- /* Use phys bypass so we don't pollute dtlb/dcache. */
11614- __asm__ __volatile__("lduwa [%1] %2, %0"
11615- : "=r" (insn)
11616- : "r" (pa), "i" (ASI_PHYS_USE_EC));
11617+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
11618+ if (pmd_trans_huge(*pmdp)) {
11619+ if (pmd_trans_splitting(*pmdp))
11620+ goto out_irq_enable;
11621
11622+ pa = pmd_pfn(*pmdp) << PAGE_SHIFT;
11623+ pa += tpc & ~HPAGE_MASK;
11624+
11625+ /* Use phys bypass so we don't pollute dtlb/dcache. */
11626+ __asm__ __volatile__("lduwa [%1] %2, %0"
11627+ : "=r" (insn)
11628+ : "r" (pa), "i" (ASI_PHYS_USE_EC));
11629+ } else
11630+#endif
11631+ {
11632+ ptep = pte_offset_map(pmdp, tpc);
11633+ pte = *ptep;
11634+ if (pte_present(pte)) {
11635+ pa = (pte_pfn(pte) << PAGE_SHIFT);
11636+ pa += (tpc & ~PAGE_MASK);
11637+
11638+ /* Use phys bypass so we don't pollute dtlb/dcache. */
11639+ __asm__ __volatile__("lduwa [%1] %2, %0"
11640+ : "=r" (insn)
11641+ : "r" (pa), "i" (ASI_PHYS_USE_EC));
11642+ }
11643+ pte_unmap(ptep);
11644+ }
11645+out_irq_enable:
11646+ local_irq_enable();
11647 out:
11648- pte_unmap(ptep);
11649- __asm__ __volatile__("wrpr %0, 0x0, %%pstate" : : "r" (pstate));
11650-outret:
11651 return insn;
11652 }
11653
11654@@ -153,7 +169,8 @@ show_signal_msg(struct pt_regs *regs, int sig, int code,
11655 }
11656
11657 static void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
11658- unsigned int insn, int fault_code)
11659+ unsigned long fault_addr, unsigned int insn,
11660+ int fault_code)
11661 {
11662 unsigned long addr;
11663 siginfo_t info;
11664@@ -161,10 +178,18 @@ static void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
11665 info.si_code = code;
11666 info.si_signo = sig;
11667 info.si_errno = 0;
11668- if (fault_code & FAULT_CODE_ITLB)
11669+ if (fault_code & FAULT_CODE_ITLB) {
11670 addr = regs->tpc;
11671- else
11672- addr = compute_effective_address(regs, insn, 0);
11673+ } else {
11674+ /* If we were able to probe the faulting instruction, use it
11675+ * to compute a precise fault address. Otherwise use the fault
11676+ * time provided address which may only have page granularity.
11677+ */
11678+ if (insn)
11679+ addr = compute_effective_address(regs, insn, 0);
11680+ else
11681+ addr = fault_addr;
11682+ }
11683 info.si_addr = (void __user *) addr;
11684 info.si_trapno = 0;
11685
11686@@ -239,7 +264,7 @@ static void __kprobes do_kernel_fault(struct pt_regs *regs, int si_code,
11687 /* The si_code was set to make clear whether
11688 * this was a SEGV_MAPERR or SEGV_ACCERR fault.
11689 */
11690- do_fault_siginfo(si_code, SIGSEGV, regs, insn, fault_code);
11691+ do_fault_siginfo(si_code, SIGSEGV, regs, address, insn, fault_code);
11692 return;
11693 }
11694
11695@@ -271,6 +296,466 @@ static void noinline __kprobes bogus_32bit_fault_address(struct pt_regs *regs,
11696 show_regs(regs);
11697 }
11698
11699+#ifdef CONFIG_PAX_PAGEEXEC
11700+#ifdef CONFIG_PAX_DLRESOLVE
11701+static void pax_emuplt_close(struct vm_area_struct *vma)
11702+{
11703+ vma->vm_mm->call_dl_resolve = 0UL;
11704+}
11705+
11706+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
11707+{
11708+ unsigned int *kaddr;
11709+
11710+ vmf->page = alloc_page(GFP_HIGHUSER);
11711+ if (!vmf->page)
11712+ return VM_FAULT_OOM;
11713+
11714+ kaddr = kmap(vmf->page);
11715+ memset(kaddr, 0, PAGE_SIZE);
11716+ kaddr[0] = 0x9DE3BFA8U; /* save */
11717+ flush_dcache_page(vmf->page);
11718+ kunmap(vmf->page);
11719+ return VM_FAULT_MAJOR;
11720+}
11721+
11722+static const struct vm_operations_struct pax_vm_ops = {
11723+ .close = pax_emuplt_close,
11724+ .fault = pax_emuplt_fault
11725+};
11726+
11727+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
11728+{
11729+ int ret;
11730+
11731+ INIT_LIST_HEAD(&vma->anon_vma_chain);
11732+ vma->vm_mm = current->mm;
11733+ vma->vm_start = addr;
11734+ vma->vm_end = addr + PAGE_SIZE;
11735+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
11736+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
11737+ vma->vm_ops = &pax_vm_ops;
11738+
11739+ ret = insert_vm_struct(current->mm, vma);
11740+ if (ret)
11741+ return ret;
11742+
11743+ ++current->mm->total_vm;
11744+ return 0;
11745+}
11746+#endif
11747+
11748+/*
11749+ * PaX: decide what to do with offenders (regs->tpc = fault address)
11750+ *
11751+ * returns 1 when task should be killed
11752+ * 2 when patched PLT trampoline was detected
11753+ * 3 when unpatched PLT trampoline was detected
11754+ */
11755+static int pax_handle_fetch_fault(struct pt_regs *regs)
11756+{
11757+
11758+#ifdef CONFIG_PAX_EMUPLT
11759+ int err;
11760+
11761+ do { /* PaX: patched PLT emulation #1 */
11762+ unsigned int sethi1, sethi2, jmpl;
11763+
11764+ err = get_user(sethi1, (unsigned int *)regs->tpc);
11765+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
11766+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
11767+
11768+ if (err)
11769+ break;
11770+
11771+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
11772+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
11773+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
11774+ {
11775+ unsigned long addr;
11776+
11777+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
11778+ addr = regs->u_regs[UREG_G1];
11779+ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
11780+
11781+ if (test_thread_flag(TIF_32BIT))
11782+ addr &= 0xFFFFFFFFUL;
11783+
11784+ regs->tpc = addr;
11785+ regs->tnpc = addr+4;
11786+ return 2;
11787+ }
11788+ } while (0);
11789+
11790+ do { /* PaX: patched PLT emulation #2 */
11791+ unsigned int ba;
11792+
11793+ err = get_user(ba, (unsigned int *)regs->tpc);
11794+
11795+ if (err)
11796+ break;
11797+
11798+ if ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30480000U) {
11799+ unsigned long addr;
11800+
11801+ if ((ba & 0xFFC00000U) == 0x30800000U)
11802+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
11803+ else
11804+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
11805+
11806+ if (test_thread_flag(TIF_32BIT))
11807+ addr &= 0xFFFFFFFFUL;
11808+
11809+ regs->tpc = addr;
11810+ regs->tnpc = addr+4;
11811+ return 2;
11812+ }
11813+ } while (0);
11814+
11815+ do { /* PaX: patched PLT emulation #3 */
11816+ unsigned int sethi, bajmpl, nop;
11817+
11818+ err = get_user(sethi, (unsigned int *)regs->tpc);
11819+ err |= get_user(bajmpl, (unsigned int *)(regs->tpc+4));
11820+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
11821+
11822+ if (err)
11823+ break;
11824+
11825+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
11826+ ((bajmpl & 0xFFFFE000U) == 0x81C06000U || (bajmpl & 0xFFF80000U) == 0x30480000U) &&
11827+ nop == 0x01000000U)
11828+ {
11829+ unsigned long addr;
11830+
11831+ addr = (sethi & 0x003FFFFFU) << 10;
11832+ regs->u_regs[UREG_G1] = addr;
11833+ if ((bajmpl & 0xFFFFE000U) == 0x81C06000U)
11834+ addr += (((bajmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
11835+ else
11836+ addr = regs->tpc + ((((bajmpl | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
11837+
11838+ if (test_thread_flag(TIF_32BIT))
11839+ addr &= 0xFFFFFFFFUL;
11840+
11841+ regs->tpc = addr;
11842+ regs->tnpc = addr+4;
11843+ return 2;
11844+ }
11845+ } while (0);
11846+
11847+ do { /* PaX: patched PLT emulation #4 */
11848+ unsigned int sethi, mov1, call, mov2;
11849+
11850+ err = get_user(sethi, (unsigned int *)regs->tpc);
11851+ err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
11852+ err |= get_user(call, (unsigned int *)(regs->tpc+8));
11853+ err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
11854+
11855+ if (err)
11856+ break;
11857+
11858+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
11859+ mov1 == 0x8210000FU &&
11860+ (call & 0xC0000000U) == 0x40000000U &&
11861+ mov2 == 0x9E100001U)
11862+ {
11863+ unsigned long addr;
11864+
11865+ regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
11866+ addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
11867+
11868+ if (test_thread_flag(TIF_32BIT))
11869+ addr &= 0xFFFFFFFFUL;
11870+
11871+ regs->tpc = addr;
11872+ regs->tnpc = addr+4;
11873+ return 2;
11874+ }
11875+ } while (0);
11876+
11877+ do { /* PaX: patched PLT emulation #5 */
11878+ unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
11879+
11880+ err = get_user(sethi, (unsigned int *)regs->tpc);
11881+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
11882+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
11883+ err |= get_user(or1, (unsigned int *)(regs->tpc+12));
11884+ err |= get_user(or2, (unsigned int *)(regs->tpc+16));
11885+ err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
11886+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
11887+ err |= get_user(nop, (unsigned int *)(regs->tpc+28));
11888+
11889+ if (err)
11890+ break;
11891+
11892+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
11893+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
11894+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
11895+ (or1 & 0xFFFFE000U) == 0x82106000U &&
11896+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
11897+ sllx == 0x83287020U &&
11898+ jmpl == 0x81C04005U &&
11899+ nop == 0x01000000U)
11900+ {
11901+ unsigned long addr;
11902+
11903+ regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
11904+ regs->u_regs[UREG_G1] <<= 32;
11905+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
11906+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
11907+ regs->tpc = addr;
11908+ regs->tnpc = addr+4;
11909+ return 2;
11910+ }
11911+ } while (0);
11912+
11913+ do { /* PaX: patched PLT emulation #6 */
11914+ unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
11915+
11916+ err = get_user(sethi, (unsigned int *)regs->tpc);
11917+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
11918+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
11919+ err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
11920+ err |= get_user(or, (unsigned int *)(regs->tpc+16));
11921+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
11922+ err |= get_user(nop, (unsigned int *)(regs->tpc+24));
11923+
11924+ if (err)
11925+ break;
11926+
11927+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
11928+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
11929+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
11930+ sllx == 0x83287020U &&
11931+ (or & 0xFFFFE000U) == 0x8A116000U &&
11932+ jmpl == 0x81C04005U &&
11933+ nop == 0x01000000U)
11934+ {
11935+ unsigned long addr;
11936+
11937+ regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
11938+ regs->u_regs[UREG_G1] <<= 32;
11939+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
11940+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
11941+ regs->tpc = addr;
11942+ regs->tnpc = addr+4;
11943+ return 2;
11944+ }
11945+ } while (0);
11946+
11947+ do { /* PaX: unpatched PLT emulation step 1 */
11948+ unsigned int sethi, ba, nop;
11949+
11950+ err = get_user(sethi, (unsigned int *)regs->tpc);
11951+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
11952+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
11953+
11954+ if (err)
11955+ break;
11956+
11957+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
11958+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
11959+ nop == 0x01000000U)
11960+ {
11961+ unsigned long addr;
11962+ unsigned int save, call;
11963+ unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
11964+
11965+ if ((ba & 0xFFC00000U) == 0x30800000U)
11966+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
11967+ else
11968+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
11969+
11970+ if (test_thread_flag(TIF_32BIT))
11971+ addr &= 0xFFFFFFFFUL;
11972+
11973+ err = get_user(save, (unsigned int *)addr);
11974+ err |= get_user(call, (unsigned int *)(addr+4));
11975+ err |= get_user(nop, (unsigned int *)(addr+8));
11976+ if (err)
11977+ break;
11978+
11979+#ifdef CONFIG_PAX_DLRESOLVE
11980+ if (save == 0x9DE3BFA8U &&
11981+ (call & 0xC0000000U) == 0x40000000U &&
11982+ nop == 0x01000000U)
11983+ {
11984+ struct vm_area_struct *vma;
11985+ unsigned long call_dl_resolve;
11986+
11987+ down_read(&current->mm->mmap_sem);
11988+ call_dl_resolve = current->mm->call_dl_resolve;
11989+ up_read(&current->mm->mmap_sem);
11990+ if (likely(call_dl_resolve))
11991+ goto emulate;
11992+
11993+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
11994+
11995+ down_write(&current->mm->mmap_sem);
11996+ if (current->mm->call_dl_resolve) {
11997+ call_dl_resolve = current->mm->call_dl_resolve;
11998+ up_write(&current->mm->mmap_sem);
11999+ if (vma)
12000+ kmem_cache_free(vm_area_cachep, vma);
12001+ goto emulate;
12002+ }
12003+
12004+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
12005+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
12006+ up_write(&current->mm->mmap_sem);
12007+ if (vma)
12008+ kmem_cache_free(vm_area_cachep, vma);
12009+ return 1;
12010+ }
12011+
12012+ if (pax_insert_vma(vma, call_dl_resolve)) {
12013+ up_write(&current->mm->mmap_sem);
12014+ kmem_cache_free(vm_area_cachep, vma);
12015+ return 1;
12016+ }
12017+
12018+ current->mm->call_dl_resolve = call_dl_resolve;
12019+ up_write(&current->mm->mmap_sem);
12020+
12021+emulate:
12022+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
12023+ regs->tpc = call_dl_resolve;
12024+ regs->tnpc = addr+4;
12025+ return 3;
12026+ }
12027+#endif
12028+
12029+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
12030+ if ((save & 0xFFC00000U) == 0x05000000U &&
12031+ (call & 0xFFFFE000U) == 0x85C0A000U &&
12032+ nop == 0x01000000U)
12033+ {
12034+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
12035+ regs->u_regs[UREG_G2] = addr + 4;
12036+ addr = (save & 0x003FFFFFU) << 10;
12037+ addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
12038+
12039+ if (test_thread_flag(TIF_32BIT))
12040+ addr &= 0xFFFFFFFFUL;
12041+
12042+ regs->tpc = addr;
12043+ regs->tnpc = addr+4;
12044+ return 3;
12045+ }
12046+
12047+ /* PaX: 64-bit PLT stub */
12048+ err = get_user(sethi1, (unsigned int *)addr);
12049+ err |= get_user(sethi2, (unsigned int *)(addr+4));
12050+ err |= get_user(or1, (unsigned int *)(addr+8));
12051+ err |= get_user(or2, (unsigned int *)(addr+12));
12052+ err |= get_user(sllx, (unsigned int *)(addr+16));
12053+ err |= get_user(add, (unsigned int *)(addr+20));
12054+ err |= get_user(jmpl, (unsigned int *)(addr+24));
12055+ err |= get_user(nop, (unsigned int *)(addr+28));
12056+ if (err)
12057+ break;
12058+
12059+ if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
12060+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
12061+ (or1 & 0xFFFFE000U) == 0x88112000U &&
12062+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
12063+ sllx == 0x89293020U &&
12064+ add == 0x8A010005U &&
12065+ jmpl == 0x89C14000U &&
12066+ nop == 0x01000000U)
12067+ {
12068+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
12069+ regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
12070+ regs->u_regs[UREG_G4] <<= 32;
12071+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
12072+ regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
12073+ regs->u_regs[UREG_G4] = addr + 24;
12074+ addr = regs->u_regs[UREG_G5];
12075+ regs->tpc = addr;
12076+ regs->tnpc = addr+4;
12077+ return 3;
12078+ }
12079+ }
12080+ } while (0);
12081+
12082+#ifdef CONFIG_PAX_DLRESOLVE
12083+ do { /* PaX: unpatched PLT emulation step 2 */
12084+ unsigned int save, call, nop;
12085+
12086+ err = get_user(save, (unsigned int *)(regs->tpc-4));
12087+ err |= get_user(call, (unsigned int *)regs->tpc);
12088+ err |= get_user(nop, (unsigned int *)(regs->tpc+4));
12089+ if (err)
12090+ break;
12091+
12092+ if (save == 0x9DE3BFA8U &&
12093+ (call & 0xC0000000U) == 0x40000000U &&
12094+ nop == 0x01000000U)
12095+ {
12096+ unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
12097+
12098+ if (test_thread_flag(TIF_32BIT))
12099+ dl_resolve &= 0xFFFFFFFFUL;
12100+
12101+ regs->u_regs[UREG_RETPC] = regs->tpc;
12102+ regs->tpc = dl_resolve;
12103+ regs->tnpc = dl_resolve+4;
12104+ return 3;
12105+ }
12106+ } while (0);
12107+#endif
12108+
12109+ do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
12110+ unsigned int sethi, ba, nop;
12111+
12112+ err = get_user(sethi, (unsigned int *)regs->tpc);
12113+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
12114+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
12115+
12116+ if (err)
12117+ break;
12118+
12119+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
12120+ (ba & 0xFFF00000U) == 0x30600000U &&
12121+ nop == 0x01000000U)
12122+ {
12123+ unsigned long addr;
12124+
12125+ addr = (sethi & 0x003FFFFFU) << 10;
12126+ regs->u_regs[UREG_G1] = addr;
12127+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
12128+
12129+ if (test_thread_flag(TIF_32BIT))
12130+ addr &= 0xFFFFFFFFUL;
12131+
12132+ regs->tpc = addr;
12133+ regs->tnpc = addr+4;
12134+ return 2;
12135+ }
12136+ } while (0);
12137+
12138+#endif
12139+
12140+ return 1;
12141+}
12142+
12143+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
12144+{
12145+ unsigned long i;
12146+
12147+ printk(KERN_ERR "PAX: bytes at PC: ");
12148+ for (i = 0; i < 8; i++) {
12149+ unsigned int c;
12150+ if (get_user(c, (unsigned int *)pc+i))
12151+ printk(KERN_CONT "???????? ");
12152+ else
12153+ printk(KERN_CONT "%08x ", c);
12154+ }
12155+ printk("\n");
12156+}
12157+#endif
12158+
12159 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
12160 {
12161 enum ctx_state prev_state = exception_enter();
12162@@ -344,6 +829,29 @@ retry:
12163 if (!vma)
12164 goto bad_area;
12165
12166+#ifdef CONFIG_PAX_PAGEEXEC
12167+ /* PaX: detect ITLB misses on non-exec pages */
12168+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
12169+ !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
12170+ {
12171+ if (address != regs->tpc)
12172+ goto good_area;
12173+
12174+ up_read(&mm->mmap_sem);
12175+ switch (pax_handle_fetch_fault(regs)) {
12176+
12177+#ifdef CONFIG_PAX_EMUPLT
12178+ case 2:
12179+ case 3:
12180+ return;
12181+#endif
12182+
12183+ }
12184+ pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
12185+ do_group_exit(SIGKILL);
12186+ }
12187+#endif
12188+
12189 /* Pure DTLB misses do not tell us whether the fault causing
12190 * load/store/atomic was a write or not, it only says that there
12191 * was no match. So in such a case we (carefully) read the
12192@@ -525,7 +1033,7 @@ do_sigbus:
12193 * Send a sigbus, regardless of whether we were in kernel
12194 * or user mode.
12195 */
12196- do_fault_siginfo(BUS_ADRERR, SIGBUS, regs, insn, fault_code);
12197+ do_fault_siginfo(BUS_ADRERR, SIGBUS, regs, address, insn, fault_code);
12198
12199 /* Kernel mode? Handle exceptions or die */
12200 if (regs->tstate & TSTATE_PRIV)
12201diff --git a/arch/sparc/mm/gup.c b/arch/sparc/mm/gup.c
12202index c4d3da6..1aed043 100644
12203--- a/arch/sparc/mm/gup.c
12204+++ b/arch/sparc/mm/gup.c
12205@@ -73,7 +73,7 @@ static int gup_huge_pmd(pmd_t *pmdp, pmd_t pmd, unsigned long addr,
12206 struct page *head, *page, *tail;
12207 int refs;
12208
12209- if (!pmd_large(pmd))
12210+ if (!(pmd_val(pmd) & _PAGE_VALID))
12211 return 0;
12212
12213 if (write && !pmd_write(pmd))
12214diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
12215index 9bd9ce8..dc84852 100644
12216--- a/arch/sparc/mm/hugetlbpage.c
12217+++ b/arch/sparc/mm/hugetlbpage.c
12218@@ -25,8 +25,10 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
12219 unsigned long addr,
12220 unsigned long len,
12221 unsigned long pgoff,
12222- unsigned long flags)
12223+ unsigned long flags,
12224+ unsigned long offset)
12225 {
12226+ struct mm_struct *mm = current->mm;
12227 unsigned long task_size = TASK_SIZE;
12228 struct vm_unmapped_area_info info;
12229
12230@@ -35,15 +37,22 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
12231
12232 info.flags = 0;
12233 info.length = len;
12234- info.low_limit = TASK_UNMAPPED_BASE;
12235+ info.low_limit = mm->mmap_base;
12236 info.high_limit = min(task_size, VA_EXCLUDE_START);
12237 info.align_mask = PAGE_MASK & ~HPAGE_MASK;
12238 info.align_offset = 0;
12239+ info.threadstack_offset = offset;
12240 addr = vm_unmapped_area(&info);
12241
12242 if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) {
12243 VM_BUG_ON(addr != -ENOMEM);
12244 info.low_limit = VA_EXCLUDE_END;
12245+
12246+#ifdef CONFIG_PAX_RANDMMAP
12247+ if (mm->pax_flags & MF_PAX_RANDMMAP)
12248+ info.low_limit += mm->delta_mmap;
12249+#endif
12250+
12251 info.high_limit = task_size;
12252 addr = vm_unmapped_area(&info);
12253 }
12254@@ -55,7 +64,8 @@ static unsigned long
12255 hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
12256 const unsigned long len,
12257 const unsigned long pgoff,
12258- const unsigned long flags)
12259+ const unsigned long flags,
12260+ const unsigned long offset)
12261 {
12262 struct mm_struct *mm = current->mm;
12263 unsigned long addr = addr0;
12264@@ -70,6 +80,7 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
12265 info.high_limit = mm->mmap_base;
12266 info.align_mask = PAGE_MASK & ~HPAGE_MASK;
12267 info.align_offset = 0;
12268+ info.threadstack_offset = offset;
12269 addr = vm_unmapped_area(&info);
12270
12271 /*
12272@@ -82,6 +93,12 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
12273 VM_BUG_ON(addr != -ENOMEM);
12274 info.flags = 0;
12275 info.low_limit = TASK_UNMAPPED_BASE;
12276+
12277+#ifdef CONFIG_PAX_RANDMMAP
12278+ if (mm->pax_flags & MF_PAX_RANDMMAP)
12279+ info.low_limit += mm->delta_mmap;
12280+#endif
12281+
12282 info.high_limit = STACK_TOP32;
12283 addr = vm_unmapped_area(&info);
12284 }
12285@@ -96,6 +113,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
12286 struct mm_struct *mm = current->mm;
12287 struct vm_area_struct *vma;
12288 unsigned long task_size = TASK_SIZE;
12289+ unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
12290
12291 if (test_thread_flag(TIF_32BIT))
12292 task_size = STACK_TOP32;
12293@@ -111,19 +129,22 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
12294 return addr;
12295 }
12296
12297+#ifdef CONFIG_PAX_RANDMMAP
12298+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
12299+#endif
12300+
12301 if (addr) {
12302 addr = ALIGN(addr, HPAGE_SIZE);
12303 vma = find_vma(mm, addr);
12304- if (task_size - len >= addr &&
12305- (!vma || addr + len <= vma->vm_start))
12306+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
12307 return addr;
12308 }
12309 if (mm->get_unmapped_area == arch_get_unmapped_area)
12310 return hugetlb_get_unmapped_area_bottomup(file, addr, len,
12311- pgoff, flags);
12312+ pgoff, flags, offset);
12313 else
12314 return hugetlb_get_unmapped_area_topdown(file, addr, len,
12315- pgoff, flags);
12316+ pgoff, flags, offset);
12317 }
12318
12319 pte_t *huge_pte_alloc(struct mm_struct *mm,
12320diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
12321index eafbc65..5a8070d 100644
12322--- a/arch/sparc/mm/init_64.c
12323+++ b/arch/sparc/mm/init_64.c
12324@@ -188,9 +188,9 @@ unsigned long sparc64_kern_sec_context __read_mostly;
12325 int num_kernel_image_mappings;
12326
12327 #ifdef CONFIG_DEBUG_DCFLUSH
12328-atomic_t dcpage_flushes = ATOMIC_INIT(0);
12329+atomic_unchecked_t dcpage_flushes = ATOMIC_INIT(0);
12330 #ifdef CONFIG_SMP
12331-atomic_t dcpage_flushes_xcall = ATOMIC_INIT(0);
12332+atomic_unchecked_t dcpage_flushes_xcall = ATOMIC_INIT(0);
12333 #endif
12334 #endif
12335
12336@@ -198,7 +198,7 @@ inline void flush_dcache_page_impl(struct page *page)
12337 {
12338 BUG_ON(tlb_type == hypervisor);
12339 #ifdef CONFIG_DEBUG_DCFLUSH
12340- atomic_inc(&dcpage_flushes);
12341+ atomic_inc_unchecked(&dcpage_flushes);
12342 #endif
12343
12344 #ifdef DCACHE_ALIASING_POSSIBLE
12345@@ -466,10 +466,10 @@ void mmu_info(struct seq_file *m)
12346
12347 #ifdef CONFIG_DEBUG_DCFLUSH
12348 seq_printf(m, "DCPageFlushes\t: %d\n",
12349- atomic_read(&dcpage_flushes));
12350+ atomic_read_unchecked(&dcpage_flushes));
12351 #ifdef CONFIG_SMP
12352 seq_printf(m, "DCPageFlushesXC\t: %d\n",
12353- atomic_read(&dcpage_flushes_xcall));
12354+ atomic_read_unchecked(&dcpage_flushes_xcall));
12355 #endif /* CONFIG_SMP */
12356 #endif /* CONFIG_DEBUG_DCFLUSH */
12357 }
12358diff --git a/arch/sparc/mm/tlb.c b/arch/sparc/mm/tlb.c
12359index b12cb5e..b89aba2 100644
12360--- a/arch/sparc/mm/tlb.c
12361+++ b/arch/sparc/mm/tlb.c
12362@@ -134,7 +134,7 @@ no_cache_flush:
12363
12364 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
12365 static void tlb_batch_pmd_scan(struct mm_struct *mm, unsigned long vaddr,
12366- pmd_t pmd, bool exec)
12367+ pmd_t pmd)
12368 {
12369 unsigned long end;
12370 pte_t *pte;
12371@@ -142,8 +142,11 @@ static void tlb_batch_pmd_scan(struct mm_struct *mm, unsigned long vaddr,
12372 pte = pte_offset_map(&pmd, vaddr);
12373 end = vaddr + HPAGE_SIZE;
12374 while (vaddr < end) {
12375- if (pte_val(*pte) & _PAGE_VALID)
12376+ if (pte_val(*pte) & _PAGE_VALID) {
12377+ bool exec = pte_exec(*pte);
12378+
12379 tlb_batch_add_one(mm, vaddr, exec);
12380+ }
12381 pte++;
12382 vaddr += PAGE_SIZE;
12383 }
12384@@ -177,19 +180,30 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr,
12385 }
12386
12387 if (!pmd_none(orig)) {
12388- pte_t orig_pte = __pte(pmd_val(orig));
12389- bool exec = pte_exec(orig_pte);
12390-
12391 addr &= HPAGE_MASK;
12392 if (pmd_trans_huge(orig)) {
12393+ pte_t orig_pte = __pte(pmd_val(orig));
12394+ bool exec = pte_exec(orig_pte);
12395+
12396 tlb_batch_add_one(mm, addr, exec);
12397 tlb_batch_add_one(mm, addr + REAL_HPAGE_SIZE, exec);
12398 } else {
12399- tlb_batch_pmd_scan(mm, addr, orig, exec);
12400+ tlb_batch_pmd_scan(mm, addr, orig);
12401 }
12402 }
12403 }
12404
12405+void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
12406+ pmd_t *pmdp)
12407+{
12408+ pmd_t entry = *pmdp;
12409+
12410+ pmd_val(entry) &= ~_PAGE_VALID;
12411+
12412+ set_pmd_at(vma->vm_mm, address, pmdp, entry);
12413+ flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
12414+}
12415+
12416 void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
12417 pgtable_t pgtable)
12418 {
12419diff --git a/arch/tile/Kconfig b/arch/tile/Kconfig
12420index b3692ce..e4517c9 100644
12421--- a/arch/tile/Kconfig
12422+++ b/arch/tile/Kconfig
12423@@ -184,6 +184,7 @@ source "kernel/Kconfig.hz"
12424
12425 config KEXEC
12426 bool "kexec system call"
12427+ depends on !GRKERNSEC_KMEM
12428 ---help---
12429 kexec is a system call that implements the ability to shutdown your
12430 current kernel, and to start another kernel. It is like a reboot
12431diff --git a/arch/tile/include/asm/atomic_64.h b/arch/tile/include/asm/atomic_64.h
12432index ad220ee..2f537b3 100644
12433--- a/arch/tile/include/asm/atomic_64.h
12434+++ b/arch/tile/include/asm/atomic_64.h
12435@@ -105,6 +105,16 @@ static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
12436
12437 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
12438
12439+#define atomic64_read_unchecked(v) atomic64_read(v)
12440+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
12441+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
12442+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
12443+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
12444+#define atomic64_inc_unchecked(v) atomic64_inc(v)
12445+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
12446+#define atomic64_dec_unchecked(v) atomic64_dec(v)
12447+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
12448+
12449 /* Atomic dec and inc don't implement barrier, so provide them if needed. */
12450 #define smp_mb__before_atomic_dec() smp_mb()
12451 #define smp_mb__after_atomic_dec() smp_mb()
12452diff --git a/arch/tile/include/asm/cache.h b/arch/tile/include/asm/cache.h
12453index 6160761..00cac88 100644
12454--- a/arch/tile/include/asm/cache.h
12455+++ b/arch/tile/include/asm/cache.h
12456@@ -15,11 +15,12 @@
12457 #ifndef _ASM_TILE_CACHE_H
12458 #define _ASM_TILE_CACHE_H
12459
12460+#include <linux/const.h>
12461 #include <arch/chip.h>
12462
12463 /* bytes per L1 data cache line */
12464 #define L1_CACHE_SHIFT CHIP_L1D_LOG_LINE_SIZE()
12465-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
12466+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
12467
12468 /* bytes per L2 cache line */
12469 #define L2_CACHE_SHIFT CHIP_L2_LOG_LINE_SIZE()
12470diff --git a/arch/tile/include/asm/uaccess.h b/arch/tile/include/asm/uaccess.h
12471index b6cde32..c0cb736 100644
12472--- a/arch/tile/include/asm/uaccess.h
12473+++ b/arch/tile/include/asm/uaccess.h
12474@@ -414,9 +414,9 @@ static inline unsigned long __must_check copy_from_user(void *to,
12475 const void __user *from,
12476 unsigned long n)
12477 {
12478- int sz = __compiletime_object_size(to);
12479+ size_t sz = __compiletime_object_size(to);
12480
12481- if (likely(sz == -1 || sz >= n))
12482+ if (likely(sz == (size_t)-1 || sz >= n))
12483 n = _copy_from_user(to, from, n);
12484 else
12485 copy_from_user_overflow();
12486diff --git a/arch/tile/mm/hugetlbpage.c b/arch/tile/mm/hugetlbpage.c
12487index 0cb3bba..7338b2d 100644
12488--- a/arch/tile/mm/hugetlbpage.c
12489+++ b/arch/tile/mm/hugetlbpage.c
12490@@ -212,6 +212,7 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
12491 info.high_limit = TASK_SIZE;
12492 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
12493 info.align_offset = 0;
12494+ info.threadstack_offset = 0;
12495 return vm_unmapped_area(&info);
12496 }
12497
12498@@ -229,6 +230,7 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
12499 info.high_limit = current->mm->mmap_base;
12500 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
12501 info.align_offset = 0;
12502+ info.threadstack_offset = 0;
12503 addr = vm_unmapped_area(&info);
12504
12505 /*
12506diff --git a/arch/um/Makefile b/arch/um/Makefile
12507index 36e658a..71a5c5a 100644
12508--- a/arch/um/Makefile
12509+++ b/arch/um/Makefile
12510@@ -72,6 +72,10 @@ USER_CFLAGS = $(patsubst $(KERNEL_DEFINES),,$(patsubst -D__KERNEL__,,\
12511 $(patsubst -I%,,$(KBUILD_CFLAGS)))) $(ARCH_INCLUDE) $(MODE_INCLUDE) \
12512 $(filter -I%,$(CFLAGS)) -D_FILE_OFFSET_BITS=64 -idirafter include
12513
12514+ifdef CONSTIFY_PLUGIN
12515+USER_CFLAGS += -fplugin-arg-constify_plugin-no-constify
12516+endif
12517+
12518 #This will adjust *FLAGS accordingly to the platform.
12519 include $(srctree)/$(ARCH_DIR)/Makefile-os-$(OS)
12520
12521diff --git a/arch/um/include/asm/cache.h b/arch/um/include/asm/cache.h
12522index 19e1bdd..3665b77 100644
12523--- a/arch/um/include/asm/cache.h
12524+++ b/arch/um/include/asm/cache.h
12525@@ -1,6 +1,7 @@
12526 #ifndef __UM_CACHE_H
12527 #define __UM_CACHE_H
12528
12529+#include <linux/const.h>
12530
12531 #if defined(CONFIG_UML_X86) && !defined(CONFIG_64BIT)
12532 # define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
12533@@ -12,6 +13,6 @@
12534 # define L1_CACHE_SHIFT 5
12535 #endif
12536
12537-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
12538+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
12539
12540 #endif
12541diff --git a/arch/um/include/asm/kmap_types.h b/arch/um/include/asm/kmap_types.h
12542index 2e0a6b1..a64d0f5 100644
12543--- a/arch/um/include/asm/kmap_types.h
12544+++ b/arch/um/include/asm/kmap_types.h
12545@@ -8,6 +8,6 @@
12546
12547 /* No more #include "asm/arch/kmap_types.h" ! */
12548
12549-#define KM_TYPE_NR 14
12550+#define KM_TYPE_NR 15
12551
12552 #endif
12553diff --git a/arch/um/include/asm/page.h b/arch/um/include/asm/page.h
12554index 5ff53d9..5850cdf 100644
12555--- a/arch/um/include/asm/page.h
12556+++ b/arch/um/include/asm/page.h
12557@@ -14,6 +14,9 @@
12558 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
12559 #define PAGE_MASK (~(PAGE_SIZE-1))
12560
12561+#define ktla_ktva(addr) (addr)
12562+#define ktva_ktla(addr) (addr)
12563+
12564 #ifndef __ASSEMBLY__
12565
12566 struct page;
12567diff --git a/arch/um/include/asm/pgtable-3level.h b/arch/um/include/asm/pgtable-3level.h
12568index 0032f92..cd151e0 100644
12569--- a/arch/um/include/asm/pgtable-3level.h
12570+++ b/arch/um/include/asm/pgtable-3level.h
12571@@ -58,6 +58,7 @@
12572 #define pud_present(x) (pud_val(x) & _PAGE_PRESENT)
12573 #define pud_populate(mm, pud, pmd) \
12574 set_pud(pud, __pud(_PAGE_TABLE + __pa(pmd)))
12575+#define pud_populate_kernel(mm, pud, pmd) pud_populate((mm), (pud), (pmd))
12576
12577 #ifdef CONFIG_64BIT
12578 #define set_pud(pudptr, pudval) set_64bit((u64 *) (pudptr), pud_val(pudval))
12579diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
12580index eecc414..48adb87 100644
12581--- a/arch/um/kernel/process.c
12582+++ b/arch/um/kernel/process.c
12583@@ -356,22 +356,6 @@ int singlestepping(void * t)
12584 return 2;
12585 }
12586
12587-/*
12588- * Only x86 and x86_64 have an arch_align_stack().
12589- * All other arches have "#define arch_align_stack(x) (x)"
12590- * in their asm/system.h
12591- * As this is included in UML from asm-um/system-generic.h,
12592- * we can use it to behave as the subarch does.
12593- */
12594-#ifndef arch_align_stack
12595-unsigned long arch_align_stack(unsigned long sp)
12596-{
12597- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
12598- sp -= get_random_int() % 8192;
12599- return sp & ~0xf;
12600-}
12601-#endif
12602-
12603 unsigned long get_wchan(struct task_struct *p)
12604 {
12605 unsigned long stack_page, sp, ip;
12606diff --git a/arch/unicore32/include/asm/cache.h b/arch/unicore32/include/asm/cache.h
12607index ad8f795..2c7eec6 100644
12608--- a/arch/unicore32/include/asm/cache.h
12609+++ b/arch/unicore32/include/asm/cache.h
12610@@ -12,8 +12,10 @@
12611 #ifndef __UNICORE_CACHE_H__
12612 #define __UNICORE_CACHE_H__
12613
12614-#define L1_CACHE_SHIFT (5)
12615-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
12616+#include <linux/const.h>
12617+
12618+#define L1_CACHE_SHIFT 5
12619+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
12620
12621 /*
12622 * Memory returned by kmalloc() may be used for DMA, so we must make
12623diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
12624index 0af5250..59f9597 100644
12625--- a/arch/x86/Kconfig
12626+++ b/arch/x86/Kconfig
12627@@ -126,7 +126,7 @@ config X86
12628 select RTC_LIB
12629 select HAVE_DEBUG_STACKOVERFLOW
12630 select HAVE_IRQ_EXIT_ON_IRQ_STACK if X86_64
12631- select HAVE_CC_STACKPROTECTOR
12632+ select HAVE_CC_STACKPROTECTOR if X86_64 || !PAX_MEMORY_UDEREF
12633
12634 config INSTRUCTION_DECODER
12635 def_bool y
12636@@ -251,7 +251,7 @@ config X86_HT
12637
12638 config X86_32_LAZY_GS
12639 def_bool y
12640- depends on X86_32 && !CC_STACKPROTECTOR
12641+ depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
12642
12643 config ARCH_HWEIGHT_CFLAGS
12644 string
12645@@ -589,6 +589,7 @@ config SCHED_OMIT_FRAME_POINTER
12646
12647 menuconfig HYPERVISOR_GUEST
12648 bool "Linux guest support"
12649+ depends on !GRKERNSEC_CONFIG_AUTO || GRKERNSEC_CONFIG_VIRT_GUEST || (GRKERNSEC_CONFIG_VIRT_HOST && GRKERNSEC_CONFIG_VIRT_XEN)
12650 ---help---
12651 Say Y here to enable options for running Linux under various hyper-
12652 visors. This option enables basic hypervisor detection and platform
12653@@ -1111,7 +1112,7 @@ choice
12654
12655 config NOHIGHMEM
12656 bool "off"
12657- depends on !X86_NUMAQ
12658+ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
12659 ---help---
12660 Linux can use up to 64 Gigabytes of physical memory on x86 systems.
12661 However, the address space of 32-bit x86 processors is only 4
12662@@ -1148,7 +1149,7 @@ config NOHIGHMEM
12663
12664 config HIGHMEM4G
12665 bool "4GB"
12666- depends on !X86_NUMAQ
12667+ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
12668 ---help---
12669 Select this if you have a 32-bit processor and between 1 and 4
12670 gigabytes of physical RAM.
12671@@ -1201,7 +1202,7 @@ config PAGE_OFFSET
12672 hex
12673 default 0xB0000000 if VMSPLIT_3G_OPT
12674 default 0x80000000 if VMSPLIT_2G
12675- default 0x78000000 if VMSPLIT_2G_OPT
12676+ default 0x70000000 if VMSPLIT_2G_OPT
12677 default 0x40000000 if VMSPLIT_1G
12678 default 0xC0000000
12679 depends on X86_32
12680@@ -1605,6 +1606,7 @@ source kernel/Kconfig.hz
12681
12682 config KEXEC
12683 bool "kexec system call"
12684+ depends on !GRKERNSEC_KMEM
12685 ---help---
12686 kexec is a system call that implements the ability to shutdown your
12687 current kernel, and to start another kernel. It is like a reboot
12688@@ -1756,7 +1758,9 @@ config X86_NEED_RELOCS
12689
12690 config PHYSICAL_ALIGN
12691 hex "Alignment value to which kernel should be aligned"
12692- default "0x200000"
12693+ default "0x1000000"
12694+ range 0x200000 0x1000000 if PAX_KERNEXEC && X86_PAE
12695+ range 0x400000 0x1000000 if PAX_KERNEXEC && !X86_PAE
12696 range 0x2000 0x1000000 if X86_32
12697 range 0x200000 0x1000000 if X86_64
12698 ---help---
12699@@ -1836,9 +1840,10 @@ config DEBUG_HOTPLUG_CPU0
12700 If unsure, say N.
12701
12702 config COMPAT_VDSO
12703- def_bool y
12704+ def_bool n
12705 prompt "Compat VDSO support"
12706 depends on X86_32 || IA32_EMULATION
12707+ depends on !PAX_PAGEEXEC && !PAX_SEGMEXEC && !PAX_KERNEXEC && !PAX_MEMORY_UDEREF
12708 ---help---
12709 Map the 32-bit VDSO to the predictable old-style address too.
12710
12711diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
12712index f3aaf23..a1d3c49 100644
12713--- a/arch/x86/Kconfig.cpu
12714+++ b/arch/x86/Kconfig.cpu
12715@@ -319,7 +319,7 @@ config X86_PPRO_FENCE
12716
12717 config X86_F00F_BUG
12718 def_bool y
12719- depends on M586MMX || M586TSC || M586 || M486
12720+ depends on (M586MMX || M586TSC || M586 || M486) && !PAX_KERNEXEC
12721
12722 config X86_INVD_BUG
12723 def_bool y
12724@@ -327,7 +327,7 @@ config X86_INVD_BUG
12725
12726 config X86_ALIGNMENT_16
12727 def_bool y
12728- depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
12729+ depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
12730
12731 config X86_INTEL_USERCOPY
12732 def_bool y
12733@@ -369,7 +369,7 @@ config X86_CMPXCHG64
12734 # generates cmov.
12735 config X86_CMOV
12736 def_bool y
12737- depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
12738+ depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
12739
12740 config X86_MINIMUM_CPU_FAMILY
12741 int
12742diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
12743index 321a52c..3d51a5e 100644
12744--- a/arch/x86/Kconfig.debug
12745+++ b/arch/x86/Kconfig.debug
12746@@ -84,7 +84,7 @@ config X86_PTDUMP
12747 config DEBUG_RODATA
12748 bool "Write protect kernel read-only data structures"
12749 default y
12750- depends on DEBUG_KERNEL
12751+ depends on DEBUG_KERNEL && BROKEN
12752 ---help---
12753 Mark the kernel read-only data as write-protected in the pagetables,
12754 in order to catch accidental (and incorrect) writes to such const
12755@@ -102,7 +102,7 @@ config DEBUG_RODATA_TEST
12756
12757 config DEBUG_SET_MODULE_RONX
12758 bool "Set loadable kernel module data as NX and text as RO"
12759- depends on MODULES
12760+ depends on MODULES && BROKEN
12761 ---help---
12762 This option helps catch unintended modifications to loadable
12763 kernel module's text and read-only data. It also prevents execution
12764diff --git a/arch/x86/Makefile b/arch/x86/Makefile
12765index 0dd99ea..4a63d82 100644
12766--- a/arch/x86/Makefile
12767+++ b/arch/x86/Makefile
12768@@ -71,9 +71,6 @@ ifeq ($(CONFIG_X86_32),y)
12769 # CPU-specific tuning. Anything which can be shared with UML should go here.
12770 include $(srctree)/arch/x86/Makefile_32.cpu
12771 KBUILD_CFLAGS += $(cflags-y)
12772-
12773- # temporary until string.h is fixed
12774- KBUILD_CFLAGS += -ffreestanding
12775 else
12776 BITS := 64
12777 UTS_MACHINE := x86_64
12778@@ -112,6 +109,9 @@ else
12779 KBUILD_CFLAGS += -maccumulate-outgoing-args
12780 endif
12781
12782+# temporary until string.h is fixed
12783+KBUILD_CFLAGS += -ffreestanding
12784+
12785 # Make sure compiler does not have buggy stack-protector support.
12786 ifdef CONFIG_CC_STACKPROTECTOR
12787 cc_has_sp := $(srctree)/scripts/gcc-x86_$(BITS)-has-stack-protector.sh
12788@@ -269,3 +269,12 @@ define archhelp
12789 echo ' FDINITRD=file initrd for the booted kernel'
12790 echo ' kvmconfig - Enable additional options for guest kernel support'
12791 endef
12792+
12793+define OLD_LD
12794+
12795+*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
12796+*** Please upgrade your binutils to 2.18 or newer
12797+endef
12798+
12799+archprepare:
12800+ $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
12801diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
12802index 878df7e..a803913 100644
12803--- a/arch/x86/boot/Makefile
12804+++ b/arch/x86/boot/Makefile
12805@@ -52,6 +52,9 @@ $(obj)/cpustr.h: $(obj)/mkcpustr FORCE
12806 # ---------------------------------------------------------------------------
12807
12808 KBUILD_CFLAGS := $(USERINCLUDE) $(REALMODE_CFLAGS) -D_SETUP
12809+ifdef CONSTIFY_PLUGIN
12810+KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
12811+endif
12812 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
12813 GCOV_PROFILE := n
12814
12815diff --git a/arch/x86/boot/bitops.h b/arch/x86/boot/bitops.h
12816index 878e4b9..20537ab 100644
12817--- a/arch/x86/boot/bitops.h
12818+++ b/arch/x86/boot/bitops.h
12819@@ -26,7 +26,7 @@ static inline int variable_test_bit(int nr, const void *addr)
12820 u8 v;
12821 const u32 *p = (const u32 *)addr;
12822
12823- asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
12824+ asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
12825 return v;
12826 }
12827
12828@@ -37,7 +37,7 @@ static inline int variable_test_bit(int nr, const void *addr)
12829
12830 static inline void set_bit(int nr, void *addr)
12831 {
12832- asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
12833+ asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
12834 }
12835
12836 #endif /* BOOT_BITOPS_H */
12837diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h
12838index 50f8c5e..4f84fff 100644
12839--- a/arch/x86/boot/boot.h
12840+++ b/arch/x86/boot/boot.h
12841@@ -84,7 +84,7 @@ static inline void io_delay(void)
12842 static inline u16 ds(void)
12843 {
12844 u16 seg;
12845- asm("movw %%ds,%0" : "=rm" (seg));
12846+ asm volatile("movw %%ds,%0" : "=rm" (seg));
12847 return seg;
12848 }
12849
12850@@ -180,7 +180,7 @@ static inline void wrgs32(u32 v, addr_t addr)
12851 static inline int memcmp(const void *s1, const void *s2, size_t len)
12852 {
12853 u8 diff;
12854- asm("repe; cmpsb; setnz %0"
12855+ asm volatile("repe; cmpsb; setnz %0"
12856 : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
12857 return diff;
12858 }
12859diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
12860index 0fcd913..3bb5c42 100644
12861--- a/arch/x86/boot/compressed/Makefile
12862+++ b/arch/x86/boot/compressed/Makefile
12863@@ -16,6 +16,9 @@ KBUILD_CFLAGS += $(cflags-y)
12864 KBUILD_CFLAGS += -mno-mmx -mno-sse
12865 KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
12866 KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
12867+ifdef CONSTIFY_PLUGIN
12868+KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
12869+endif
12870
12871 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
12872 GCOV_PROFILE := n
12873diff --git a/arch/x86/boot/compressed/efi_stub_32.S b/arch/x86/boot/compressed/efi_stub_32.S
12874index a53440e..c3dbf1e 100644
12875--- a/arch/x86/boot/compressed/efi_stub_32.S
12876+++ b/arch/x86/boot/compressed/efi_stub_32.S
12877@@ -46,16 +46,13 @@ ENTRY(efi_call_phys)
12878 * parameter 2, ..., param n. To make things easy, we save the return
12879 * address of efi_call_phys in a global variable.
12880 */
12881- popl %ecx
12882- movl %ecx, saved_return_addr(%edx)
12883- /* get the function pointer into ECX*/
12884- popl %ecx
12885- movl %ecx, efi_rt_function_ptr(%edx)
12886+ popl saved_return_addr(%edx)
12887+ popl efi_rt_function_ptr(%edx)
12888
12889 /*
12890 * 3. Call the physical function.
12891 */
12892- call *%ecx
12893+ call *efi_rt_function_ptr(%edx)
12894
12895 /*
12896 * 4. Balance the stack. And because EAX contain the return value,
12897@@ -67,15 +64,12 @@ ENTRY(efi_call_phys)
12898 1: popl %edx
12899 subl $1b, %edx
12900
12901- movl efi_rt_function_ptr(%edx), %ecx
12902- pushl %ecx
12903+ pushl efi_rt_function_ptr(%edx)
12904
12905 /*
12906 * 10. Push the saved return address onto the stack and return.
12907 */
12908- movl saved_return_addr(%edx), %ecx
12909- pushl %ecx
12910- ret
12911+ jmpl *saved_return_addr(%edx)
12912 ENDPROC(efi_call_phys)
12913 .previous
12914
12915diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
12916index f45ab7a..ebc015f 100644
12917--- a/arch/x86/boot/compressed/head_32.S
12918+++ b/arch/x86/boot/compressed/head_32.S
12919@@ -119,10 +119,10 @@ preferred_addr:
12920 addl %eax, %ebx
12921 notl %eax
12922 andl %eax, %ebx
12923- cmpl $LOAD_PHYSICAL_ADDR, %ebx
12924+ cmpl $____LOAD_PHYSICAL_ADDR, %ebx
12925 jge 1f
12926 #endif
12927- movl $LOAD_PHYSICAL_ADDR, %ebx
12928+ movl $____LOAD_PHYSICAL_ADDR, %ebx
12929 1:
12930
12931 /* Target address to relocate to for decompression */
12932diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
12933index b10fa66..5ee0472 100644
12934--- a/arch/x86/boot/compressed/head_64.S
12935+++ b/arch/x86/boot/compressed/head_64.S
12936@@ -94,10 +94,10 @@ ENTRY(startup_32)
12937 addl %eax, %ebx
12938 notl %eax
12939 andl %eax, %ebx
12940- cmpl $LOAD_PHYSICAL_ADDR, %ebx
12941+ cmpl $____LOAD_PHYSICAL_ADDR, %ebx
12942 jge 1f
12943 #endif
12944- movl $LOAD_PHYSICAL_ADDR, %ebx
12945+ movl $____LOAD_PHYSICAL_ADDR, %ebx
12946 1:
12947
12948 /* Target address to relocate to for decompression */
12949@@ -268,10 +268,10 @@ preferred_addr:
12950 addq %rax, %rbp
12951 notq %rax
12952 andq %rax, %rbp
12953- cmpq $LOAD_PHYSICAL_ADDR, %rbp
12954+ cmpq $____LOAD_PHYSICAL_ADDR, %rbp
12955 jge 1f
12956 #endif
12957- movq $LOAD_PHYSICAL_ADDR, %rbp
12958+ movq $____LOAD_PHYSICAL_ADDR, %rbp
12959 1:
12960
12961 /* Target address to relocate to for decompression */
12962@@ -363,8 +363,8 @@ gdt:
12963 .long gdt
12964 .word 0
12965 .quad 0x0000000000000000 /* NULL descriptor */
12966- .quad 0x00af9a000000ffff /* __KERNEL_CS */
12967- .quad 0x00cf92000000ffff /* __KERNEL_DS */
12968+ .quad 0x00af9b000000ffff /* __KERNEL_CS */
12969+ .quad 0x00cf93000000ffff /* __KERNEL_DS */
12970 .quad 0x0080890000000000 /* TS descriptor */
12971 .quad 0x0000000000000000 /* TS continued */
12972 gdt_end:
12973diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
12974index 196eaf3..c96716d 100644
12975--- a/arch/x86/boot/compressed/misc.c
12976+++ b/arch/x86/boot/compressed/misc.c
12977@@ -218,7 +218,7 @@ void __putstr(const char *s)
12978
12979 void *memset(void *s, int c, size_t n)
12980 {
12981- int i;
12982+ size_t i;
12983 char *ss = s;
12984
12985 for (i = 0; i < n; i++)
12986@@ -277,7 +277,7 @@ static void handle_relocations(void *output, unsigned long output_len)
12987 * Calculate the delta between where vmlinux was linked to load
12988 * and where it was actually loaded.
12989 */
12990- delta = min_addr - LOAD_PHYSICAL_ADDR;
12991+ delta = min_addr - ____LOAD_PHYSICAL_ADDR;
12992 if (!delta) {
12993 debug_putstr("No relocation needed... ");
12994 return;
12995@@ -347,7 +347,7 @@ static void parse_elf(void *output)
12996 Elf32_Ehdr ehdr;
12997 Elf32_Phdr *phdrs, *phdr;
12998 #endif
12999- void *dest;
13000+ void *dest, *prev;
13001 int i;
13002
13003 memcpy(&ehdr, output, sizeof(ehdr));
13004@@ -374,13 +374,16 @@ static void parse_elf(void *output)
13005 case PT_LOAD:
13006 #ifdef CONFIG_RELOCATABLE
13007 dest = output;
13008- dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
13009+ dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
13010 #else
13011 dest = (void *)(phdr->p_paddr);
13012 #endif
13013 memcpy(dest,
13014 output + phdr->p_offset,
13015 phdr->p_filesz);
13016+ if (i)
13017+ memset(prev, 0xff, dest - prev);
13018+ prev = dest + phdr->p_filesz;
13019 break;
13020 default: /* Ignore other PT_* */ break;
13021 }
13022@@ -430,7 +433,7 @@ asmlinkage void *decompress_kernel(void *rmode, memptr heap,
13023 error("Destination address too large");
13024 #endif
13025 #ifndef CONFIG_RELOCATABLE
13026- if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
13027+ if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
13028 error("Wrong destination address");
13029 #endif
13030
13031diff --git a/arch/x86/boot/cpucheck.c b/arch/x86/boot/cpucheck.c
13032index 100a9a1..bb3bdb0 100644
13033--- a/arch/x86/boot/cpucheck.c
13034+++ b/arch/x86/boot/cpucheck.c
13035@@ -117,9 +117,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
13036 u32 ecx = MSR_K7_HWCR;
13037 u32 eax, edx;
13038
13039- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
13040+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
13041 eax &= ~(1 << 15);
13042- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
13043+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
13044
13045 get_cpuflags(); /* Make sure it really did something */
13046 err = check_cpuflags();
13047@@ -132,9 +132,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
13048 u32 ecx = MSR_VIA_FCR;
13049 u32 eax, edx;
13050
13051- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
13052+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
13053 eax |= (1<<1)|(1<<7);
13054- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
13055+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
13056
13057 set_bit(X86_FEATURE_CX8, cpu.flags);
13058 err = check_cpuflags();
13059@@ -145,12 +145,12 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
13060 u32 eax, edx;
13061 u32 level = 1;
13062
13063- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
13064- asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
13065- asm("cpuid"
13066+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
13067+ asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
13068+ asm volatile("cpuid"
13069 : "+a" (level), "=d" (cpu.flags[0])
13070 : : "ecx", "ebx");
13071- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
13072+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
13073
13074 err = check_cpuflags();
13075 }
13076diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
13077index ec3b8ba..6a0db1f 100644
13078--- a/arch/x86/boot/header.S
13079+++ b/arch/x86/boot/header.S
13080@@ -416,10 +416,14 @@ setup_data: .quad 0 # 64-bit physical pointer to
13081 # single linked list of
13082 # struct setup_data
13083
13084-pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
13085+pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
13086
13087 #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
13088+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
13089+#define VO_INIT_SIZE (VO__end - VO__text - __PAGE_OFFSET - ____LOAD_PHYSICAL_ADDR)
13090+#else
13091 #define VO_INIT_SIZE (VO__end - VO__text)
13092+#endif
13093 #if ZO_INIT_SIZE > VO_INIT_SIZE
13094 #define INIT_SIZE ZO_INIT_SIZE
13095 #else
13096diff --git a/arch/x86/boot/memory.c b/arch/x86/boot/memory.c
13097index db75d07..8e6d0af 100644
13098--- a/arch/x86/boot/memory.c
13099+++ b/arch/x86/boot/memory.c
13100@@ -19,7 +19,7 @@
13101
13102 static int detect_memory_e820(void)
13103 {
13104- int count = 0;
13105+ unsigned int count = 0;
13106 struct biosregs ireg, oreg;
13107 struct e820entry *desc = boot_params.e820_map;
13108 static struct e820entry buf; /* static so it is zeroed */
13109diff --git a/arch/x86/boot/video-vesa.c b/arch/x86/boot/video-vesa.c
13110index 11e8c6e..fdbb1ed 100644
13111--- a/arch/x86/boot/video-vesa.c
13112+++ b/arch/x86/boot/video-vesa.c
13113@@ -200,6 +200,7 @@ static void vesa_store_pm_info(void)
13114
13115 boot_params.screen_info.vesapm_seg = oreg.es;
13116 boot_params.screen_info.vesapm_off = oreg.di;
13117+ boot_params.screen_info.vesapm_size = oreg.cx;
13118 }
13119
13120 /*
13121diff --git a/arch/x86/boot/video.c b/arch/x86/boot/video.c
13122index 43eda28..5ab5fdb 100644
13123--- a/arch/x86/boot/video.c
13124+++ b/arch/x86/boot/video.c
13125@@ -96,7 +96,7 @@ static void store_mode_params(void)
13126 static unsigned int get_entry(void)
13127 {
13128 char entry_buf[4];
13129- int i, len = 0;
13130+ unsigned int i, len = 0;
13131 int key;
13132 unsigned int v;
13133
13134diff --git a/arch/x86/crypto/aes-x86_64-asm_64.S b/arch/x86/crypto/aes-x86_64-asm_64.S
13135index 9105655..41779c1 100644
13136--- a/arch/x86/crypto/aes-x86_64-asm_64.S
13137+++ b/arch/x86/crypto/aes-x86_64-asm_64.S
13138@@ -8,6 +8,8 @@
13139 * including this sentence is retained in full.
13140 */
13141
13142+#include <asm/alternative-asm.h>
13143+
13144 .extern crypto_ft_tab
13145 .extern crypto_it_tab
13146 .extern crypto_fl_tab
13147@@ -70,6 +72,8 @@
13148 je B192; \
13149 leaq 32(r9),r9;
13150
13151+#define ret pax_force_retaddr; ret
13152+
13153 #define epilogue(FUNC,r1,r2,r3,r4,r5,r6,r7,r8,r9) \
13154 movq r1,r2; \
13155 movq r3,r4; \
13156diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
13157index 477e9d7..c92c7d8 100644
13158--- a/arch/x86/crypto/aesni-intel_asm.S
13159+++ b/arch/x86/crypto/aesni-intel_asm.S
13160@@ -31,6 +31,7 @@
13161
13162 #include <linux/linkage.h>
13163 #include <asm/inst.h>
13164+#include <asm/alternative-asm.h>
13165
13166 #ifdef __x86_64__
13167 .data
13168@@ -205,7 +206,7 @@ enc: .octa 0x2
13169 * num_initial_blocks = b mod 4
13170 * encrypt the initial num_initial_blocks blocks and apply ghash on
13171 * the ciphertext
13172-* %r10, %r11, %r12, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers
13173+* %r10, %r11, %r15, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers
13174 * are clobbered
13175 * arg1, %arg2, %arg3, %r14 are used as a pointer only, not modified
13176 */
13177@@ -214,8 +215,8 @@ enc: .octa 0x2
13178 .macro INITIAL_BLOCKS_DEC num_initial_blocks TMP1 TMP2 TMP3 TMP4 TMP5 XMM0 XMM1 \
13179 XMM2 XMM3 XMM4 XMMDst TMP6 TMP7 i i_seq operation
13180 mov arg7, %r10 # %r10 = AAD
13181- mov arg8, %r12 # %r12 = aadLen
13182- mov %r12, %r11
13183+ mov arg8, %r15 # %r15 = aadLen
13184+ mov %r15, %r11
13185 pxor %xmm\i, %xmm\i
13186 _get_AAD_loop\num_initial_blocks\operation:
13187 movd (%r10), \TMP1
13188@@ -223,15 +224,15 @@ _get_AAD_loop\num_initial_blocks\operation:
13189 psrldq $4, %xmm\i
13190 pxor \TMP1, %xmm\i
13191 add $4, %r10
13192- sub $4, %r12
13193+ sub $4, %r15
13194 jne _get_AAD_loop\num_initial_blocks\operation
13195 cmp $16, %r11
13196 je _get_AAD_loop2_done\num_initial_blocks\operation
13197- mov $16, %r12
13198+ mov $16, %r15
13199 _get_AAD_loop2\num_initial_blocks\operation:
13200 psrldq $4, %xmm\i
13201- sub $4, %r12
13202- cmp %r11, %r12
13203+ sub $4, %r15
13204+ cmp %r11, %r15
13205 jne _get_AAD_loop2\num_initial_blocks\operation
13206 _get_AAD_loop2_done\num_initial_blocks\operation:
13207 movdqa SHUF_MASK(%rip), %xmm14
13208@@ -443,7 +444,7 @@ _initial_blocks_done\num_initial_blocks\operation:
13209 * num_initial_blocks = b mod 4
13210 * encrypt the initial num_initial_blocks blocks and apply ghash on
13211 * the ciphertext
13212-* %r10, %r11, %r12, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers
13213+* %r10, %r11, %r15, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers
13214 * are clobbered
13215 * arg1, %arg2, %arg3, %r14 are used as a pointer only, not modified
13216 */
13217@@ -452,8 +453,8 @@ _initial_blocks_done\num_initial_blocks\operation:
13218 .macro INITIAL_BLOCKS_ENC num_initial_blocks TMP1 TMP2 TMP3 TMP4 TMP5 XMM0 XMM1 \
13219 XMM2 XMM3 XMM4 XMMDst TMP6 TMP7 i i_seq operation
13220 mov arg7, %r10 # %r10 = AAD
13221- mov arg8, %r12 # %r12 = aadLen
13222- mov %r12, %r11
13223+ mov arg8, %r15 # %r15 = aadLen
13224+ mov %r15, %r11
13225 pxor %xmm\i, %xmm\i
13226 _get_AAD_loop\num_initial_blocks\operation:
13227 movd (%r10), \TMP1
13228@@ -461,15 +462,15 @@ _get_AAD_loop\num_initial_blocks\operation:
13229 psrldq $4, %xmm\i
13230 pxor \TMP1, %xmm\i
13231 add $4, %r10
13232- sub $4, %r12
13233+ sub $4, %r15
13234 jne _get_AAD_loop\num_initial_blocks\operation
13235 cmp $16, %r11
13236 je _get_AAD_loop2_done\num_initial_blocks\operation
13237- mov $16, %r12
13238+ mov $16, %r15
13239 _get_AAD_loop2\num_initial_blocks\operation:
13240 psrldq $4, %xmm\i
13241- sub $4, %r12
13242- cmp %r11, %r12
13243+ sub $4, %r15
13244+ cmp %r11, %r15
13245 jne _get_AAD_loop2\num_initial_blocks\operation
13246 _get_AAD_loop2_done\num_initial_blocks\operation:
13247 movdqa SHUF_MASK(%rip), %xmm14
13248@@ -1269,7 +1270,7 @@ TMP7 XMM1 XMM2 XMM3 XMM4 XMMDst
13249 *
13250 *****************************************************************************/
13251 ENTRY(aesni_gcm_dec)
13252- push %r12
13253+ push %r15
13254 push %r13
13255 push %r14
13256 mov %rsp, %r14
13257@@ -1279,8 +1280,8 @@ ENTRY(aesni_gcm_dec)
13258 */
13259 sub $VARIABLE_OFFSET, %rsp
13260 and $~63, %rsp # align rsp to 64 bytes
13261- mov %arg6, %r12
13262- movdqu (%r12), %xmm13 # %xmm13 = HashKey
13263+ mov %arg6, %r15
13264+ movdqu (%r15), %xmm13 # %xmm13 = HashKey
13265 movdqa SHUF_MASK(%rip), %xmm2
13266 PSHUFB_XMM %xmm2, %xmm13
13267
13268@@ -1308,10 +1309,10 @@ ENTRY(aesni_gcm_dec)
13269 movdqa %xmm13, HashKey(%rsp) # store HashKey<<1 (mod poly)
13270 mov %arg4, %r13 # save the number of bytes of plaintext/ciphertext
13271 and $-16, %r13 # %r13 = %r13 - (%r13 mod 16)
13272- mov %r13, %r12
13273- and $(3<<4), %r12
13274+ mov %r13, %r15
13275+ and $(3<<4), %r15
13276 jz _initial_num_blocks_is_0_decrypt
13277- cmp $(2<<4), %r12
13278+ cmp $(2<<4), %r15
13279 jb _initial_num_blocks_is_1_decrypt
13280 je _initial_num_blocks_is_2_decrypt
13281 _initial_num_blocks_is_3_decrypt:
13282@@ -1361,16 +1362,16 @@ _zero_cipher_left_decrypt:
13283 sub $16, %r11
13284 add %r13, %r11
13285 movdqu (%arg3,%r11,1), %xmm1 # receive the last <16 byte block
13286- lea SHIFT_MASK+16(%rip), %r12
13287- sub %r13, %r12
13288+ lea SHIFT_MASK+16(%rip), %r15
13289+ sub %r13, %r15
13290 # adjust the shuffle mask pointer to be able to shift 16-%r13 bytes
13291 # (%r13 is the number of bytes in plaintext mod 16)
13292- movdqu (%r12), %xmm2 # get the appropriate shuffle mask
13293+ movdqu (%r15), %xmm2 # get the appropriate shuffle mask
13294 PSHUFB_XMM %xmm2, %xmm1 # right shift 16-%r13 butes
13295
13296 movdqa %xmm1, %xmm2
13297 pxor %xmm1, %xmm0 # Ciphertext XOR E(K, Yn)
13298- movdqu ALL_F-SHIFT_MASK(%r12), %xmm1
13299+ movdqu ALL_F-SHIFT_MASK(%r15), %xmm1
13300 # get the appropriate mask to mask out top 16-%r13 bytes of %xmm0
13301 pand %xmm1, %xmm0 # mask out top 16-%r13 bytes of %xmm0
13302 pand %xmm1, %xmm2
13303@@ -1399,9 +1400,9 @@ _less_than_8_bytes_left_decrypt:
13304 sub $1, %r13
13305 jne _less_than_8_bytes_left_decrypt
13306 _multiple_of_16_bytes_decrypt:
13307- mov arg8, %r12 # %r13 = aadLen (number of bytes)
13308- shl $3, %r12 # convert into number of bits
13309- movd %r12d, %xmm15 # len(A) in %xmm15
13310+ mov arg8, %r15 # %r13 = aadLen (number of bytes)
13311+ shl $3, %r15 # convert into number of bits
13312+ movd %r15d, %xmm15 # len(A) in %xmm15
13313 shl $3, %arg4 # len(C) in bits (*128)
13314 MOVQ_R64_XMM %arg4, %xmm1
13315 pslldq $8, %xmm15 # %xmm15 = len(A)||0x0000000000000000
13316@@ -1440,7 +1441,8 @@ _return_T_done_decrypt:
13317 mov %r14, %rsp
13318 pop %r14
13319 pop %r13
13320- pop %r12
13321+ pop %r15
13322+ pax_force_retaddr
13323 ret
13324 ENDPROC(aesni_gcm_dec)
13325
13326@@ -1529,7 +1531,7 @@ ENDPROC(aesni_gcm_dec)
13327 * poly = x^128 + x^127 + x^126 + x^121 + 1
13328 ***************************************************************************/
13329 ENTRY(aesni_gcm_enc)
13330- push %r12
13331+ push %r15
13332 push %r13
13333 push %r14
13334 mov %rsp, %r14
13335@@ -1539,8 +1541,8 @@ ENTRY(aesni_gcm_enc)
13336 #
13337 sub $VARIABLE_OFFSET, %rsp
13338 and $~63, %rsp
13339- mov %arg6, %r12
13340- movdqu (%r12), %xmm13
13341+ mov %arg6, %r15
13342+ movdqu (%r15), %xmm13
13343 movdqa SHUF_MASK(%rip), %xmm2
13344 PSHUFB_XMM %xmm2, %xmm13
13345
13346@@ -1564,13 +1566,13 @@ ENTRY(aesni_gcm_enc)
13347 movdqa %xmm13, HashKey(%rsp)
13348 mov %arg4, %r13 # %xmm13 holds HashKey<<1 (mod poly)
13349 and $-16, %r13
13350- mov %r13, %r12
13351+ mov %r13, %r15
13352
13353 # Encrypt first few blocks
13354
13355- and $(3<<4), %r12
13356+ and $(3<<4), %r15
13357 jz _initial_num_blocks_is_0_encrypt
13358- cmp $(2<<4), %r12
13359+ cmp $(2<<4), %r15
13360 jb _initial_num_blocks_is_1_encrypt
13361 je _initial_num_blocks_is_2_encrypt
13362 _initial_num_blocks_is_3_encrypt:
13363@@ -1623,14 +1625,14 @@ _zero_cipher_left_encrypt:
13364 sub $16, %r11
13365 add %r13, %r11
13366 movdqu (%arg3,%r11,1), %xmm1 # receive the last <16 byte blocks
13367- lea SHIFT_MASK+16(%rip), %r12
13368- sub %r13, %r12
13369+ lea SHIFT_MASK+16(%rip), %r15
13370+ sub %r13, %r15
13371 # adjust the shuffle mask pointer to be able to shift 16-r13 bytes
13372 # (%r13 is the number of bytes in plaintext mod 16)
13373- movdqu (%r12), %xmm2 # get the appropriate shuffle mask
13374+ movdqu (%r15), %xmm2 # get the appropriate shuffle mask
13375 PSHUFB_XMM %xmm2, %xmm1 # shift right 16-r13 byte
13376 pxor %xmm1, %xmm0 # Plaintext XOR Encrypt(K, Yn)
13377- movdqu ALL_F-SHIFT_MASK(%r12), %xmm1
13378+ movdqu ALL_F-SHIFT_MASK(%r15), %xmm1
13379 # get the appropriate mask to mask out top 16-r13 bytes of xmm0
13380 pand %xmm1, %xmm0 # mask out top 16-r13 bytes of xmm0
13381 movdqa SHUF_MASK(%rip), %xmm10
13382@@ -1663,9 +1665,9 @@ _less_than_8_bytes_left_encrypt:
13383 sub $1, %r13
13384 jne _less_than_8_bytes_left_encrypt
13385 _multiple_of_16_bytes_encrypt:
13386- mov arg8, %r12 # %r12 = addLen (number of bytes)
13387- shl $3, %r12
13388- movd %r12d, %xmm15 # len(A) in %xmm15
13389+ mov arg8, %r15 # %r15 = addLen (number of bytes)
13390+ shl $3, %r15
13391+ movd %r15d, %xmm15 # len(A) in %xmm15
13392 shl $3, %arg4 # len(C) in bits (*128)
13393 MOVQ_R64_XMM %arg4, %xmm1
13394 pslldq $8, %xmm15 # %xmm15 = len(A)||0x0000000000000000
13395@@ -1704,7 +1706,8 @@ _return_T_done_encrypt:
13396 mov %r14, %rsp
13397 pop %r14
13398 pop %r13
13399- pop %r12
13400+ pop %r15
13401+ pax_force_retaddr
13402 ret
13403 ENDPROC(aesni_gcm_enc)
13404
13405@@ -1722,6 +1725,7 @@ _key_expansion_256a:
13406 pxor %xmm1, %xmm0
13407 movaps %xmm0, (TKEYP)
13408 add $0x10, TKEYP
13409+ pax_force_retaddr
13410 ret
13411 ENDPROC(_key_expansion_128)
13412 ENDPROC(_key_expansion_256a)
13413@@ -1748,6 +1752,7 @@ _key_expansion_192a:
13414 shufps $0b01001110, %xmm2, %xmm1
13415 movaps %xmm1, 0x10(TKEYP)
13416 add $0x20, TKEYP
13417+ pax_force_retaddr
13418 ret
13419 ENDPROC(_key_expansion_192a)
13420
13421@@ -1768,6 +1773,7 @@ _key_expansion_192b:
13422
13423 movaps %xmm0, (TKEYP)
13424 add $0x10, TKEYP
13425+ pax_force_retaddr
13426 ret
13427 ENDPROC(_key_expansion_192b)
13428
13429@@ -1781,6 +1787,7 @@ _key_expansion_256b:
13430 pxor %xmm1, %xmm2
13431 movaps %xmm2, (TKEYP)
13432 add $0x10, TKEYP
13433+ pax_force_retaddr
13434 ret
13435 ENDPROC(_key_expansion_256b)
13436
13437@@ -1894,6 +1901,7 @@ ENTRY(aesni_set_key)
13438 #ifndef __x86_64__
13439 popl KEYP
13440 #endif
13441+ pax_force_retaddr
13442 ret
13443 ENDPROC(aesni_set_key)
13444
13445@@ -1916,6 +1924,7 @@ ENTRY(aesni_enc)
13446 popl KLEN
13447 popl KEYP
13448 #endif
13449+ pax_force_retaddr
13450 ret
13451 ENDPROC(aesni_enc)
13452
13453@@ -1974,6 +1983,7 @@ _aesni_enc1:
13454 AESENC KEY STATE
13455 movaps 0x70(TKEYP), KEY
13456 AESENCLAST KEY STATE
13457+ pax_force_retaddr
13458 ret
13459 ENDPROC(_aesni_enc1)
13460
13461@@ -2083,6 +2093,7 @@ _aesni_enc4:
13462 AESENCLAST KEY STATE2
13463 AESENCLAST KEY STATE3
13464 AESENCLAST KEY STATE4
13465+ pax_force_retaddr
13466 ret
13467 ENDPROC(_aesni_enc4)
13468
13469@@ -2106,6 +2117,7 @@ ENTRY(aesni_dec)
13470 popl KLEN
13471 popl KEYP
13472 #endif
13473+ pax_force_retaddr
13474 ret
13475 ENDPROC(aesni_dec)
13476
13477@@ -2164,6 +2176,7 @@ _aesni_dec1:
13478 AESDEC KEY STATE
13479 movaps 0x70(TKEYP), KEY
13480 AESDECLAST KEY STATE
13481+ pax_force_retaddr
13482 ret
13483 ENDPROC(_aesni_dec1)
13484
13485@@ -2273,6 +2286,7 @@ _aesni_dec4:
13486 AESDECLAST KEY STATE2
13487 AESDECLAST KEY STATE3
13488 AESDECLAST KEY STATE4
13489+ pax_force_retaddr
13490 ret
13491 ENDPROC(_aesni_dec4)
13492
13493@@ -2331,6 +2345,7 @@ ENTRY(aesni_ecb_enc)
13494 popl KEYP
13495 popl LEN
13496 #endif
13497+ pax_force_retaddr
13498 ret
13499 ENDPROC(aesni_ecb_enc)
13500
13501@@ -2390,6 +2405,7 @@ ENTRY(aesni_ecb_dec)
13502 popl KEYP
13503 popl LEN
13504 #endif
13505+ pax_force_retaddr
13506 ret
13507 ENDPROC(aesni_ecb_dec)
13508
13509@@ -2432,6 +2448,7 @@ ENTRY(aesni_cbc_enc)
13510 popl LEN
13511 popl IVP
13512 #endif
13513+ pax_force_retaddr
13514 ret
13515 ENDPROC(aesni_cbc_enc)
13516
13517@@ -2523,6 +2540,7 @@ ENTRY(aesni_cbc_dec)
13518 popl LEN
13519 popl IVP
13520 #endif
13521+ pax_force_retaddr
13522 ret
13523 ENDPROC(aesni_cbc_dec)
13524
13525@@ -2550,6 +2568,7 @@ _aesni_inc_init:
13526 mov $1, TCTR_LOW
13527 MOVQ_R64_XMM TCTR_LOW INC
13528 MOVQ_R64_XMM CTR TCTR_LOW
13529+ pax_force_retaddr
13530 ret
13531 ENDPROC(_aesni_inc_init)
13532
13533@@ -2579,6 +2598,7 @@ _aesni_inc:
13534 .Linc_low:
13535 movaps CTR, IV
13536 PSHUFB_XMM BSWAP_MASK IV
13537+ pax_force_retaddr
13538 ret
13539 ENDPROC(_aesni_inc)
13540
13541@@ -2640,6 +2660,7 @@ ENTRY(aesni_ctr_enc)
13542 .Lctr_enc_ret:
13543 movups IV, (IVP)
13544 .Lctr_enc_just_ret:
13545+ pax_force_retaddr
13546 ret
13547 ENDPROC(aesni_ctr_enc)
13548
13549@@ -2766,6 +2787,7 @@ ENTRY(aesni_xts_crypt8)
13550 pxor INC, STATE4
13551 movdqu STATE4, 0x70(OUTP)
13552
13553+ pax_force_retaddr
13554 ret
13555 ENDPROC(aesni_xts_crypt8)
13556
13557diff --git a/arch/x86/crypto/blowfish-x86_64-asm_64.S b/arch/x86/crypto/blowfish-x86_64-asm_64.S
13558index 246c670..466e2d6 100644
13559--- a/arch/x86/crypto/blowfish-x86_64-asm_64.S
13560+++ b/arch/x86/crypto/blowfish-x86_64-asm_64.S
13561@@ -21,6 +21,7 @@
13562 */
13563
13564 #include <linux/linkage.h>
13565+#include <asm/alternative-asm.h>
13566
13567 .file "blowfish-x86_64-asm.S"
13568 .text
13569@@ -149,9 +150,11 @@ ENTRY(__blowfish_enc_blk)
13570 jnz .L__enc_xor;
13571
13572 write_block();
13573+ pax_force_retaddr
13574 ret;
13575 .L__enc_xor:
13576 xor_block();
13577+ pax_force_retaddr
13578 ret;
13579 ENDPROC(__blowfish_enc_blk)
13580
13581@@ -183,6 +186,7 @@ ENTRY(blowfish_dec_blk)
13582
13583 movq %r11, %rbp;
13584
13585+ pax_force_retaddr
13586 ret;
13587 ENDPROC(blowfish_dec_blk)
13588
13589@@ -334,6 +338,7 @@ ENTRY(__blowfish_enc_blk_4way)
13590
13591 popq %rbx;
13592 popq %rbp;
13593+ pax_force_retaddr
13594 ret;
13595
13596 .L__enc_xor4:
13597@@ -341,6 +346,7 @@ ENTRY(__blowfish_enc_blk_4way)
13598
13599 popq %rbx;
13600 popq %rbp;
13601+ pax_force_retaddr
13602 ret;
13603 ENDPROC(__blowfish_enc_blk_4way)
13604
13605@@ -375,5 +381,6 @@ ENTRY(blowfish_dec_blk_4way)
13606 popq %rbx;
13607 popq %rbp;
13608
13609+ pax_force_retaddr
13610 ret;
13611 ENDPROC(blowfish_dec_blk_4way)
13612diff --git a/arch/x86/crypto/camellia-aesni-avx-asm_64.S b/arch/x86/crypto/camellia-aesni-avx-asm_64.S
13613index ce71f92..1dce7ec 100644
13614--- a/arch/x86/crypto/camellia-aesni-avx-asm_64.S
13615+++ b/arch/x86/crypto/camellia-aesni-avx-asm_64.S
13616@@ -16,6 +16,7 @@
13617 */
13618
13619 #include <linux/linkage.h>
13620+#include <asm/alternative-asm.h>
13621
13622 #define CAMELLIA_TABLE_BYTE_LEN 272
13623
13624@@ -191,6 +192,7 @@ roundsm16_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd:
13625 roundsm16(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7,
13626 %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15,
13627 %rcx, (%r9));
13628+ pax_force_retaddr
13629 ret;
13630 ENDPROC(roundsm16_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd)
13631
13632@@ -199,6 +201,7 @@ roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab:
13633 roundsm16(%xmm4, %xmm5, %xmm6, %xmm7, %xmm0, %xmm1, %xmm2, %xmm3,
13634 %xmm12, %xmm13, %xmm14, %xmm15, %xmm8, %xmm9, %xmm10, %xmm11,
13635 %rax, (%r9));
13636+ pax_force_retaddr
13637 ret;
13638 ENDPROC(roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab)
13639
13640@@ -780,6 +783,7 @@ __camellia_enc_blk16:
13641 %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14,
13642 %xmm15, (key_table)(CTX, %r8, 8), (%rax), 1 * 16(%rax));
13643
13644+ pax_force_retaddr
13645 ret;
13646
13647 .align 8
13648@@ -865,6 +869,7 @@ __camellia_dec_blk16:
13649 %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14,
13650 %xmm15, (key_table)(CTX), (%rax), 1 * 16(%rax));
13651
13652+ pax_force_retaddr
13653 ret;
13654
13655 .align 8
13656@@ -904,6 +909,7 @@ ENTRY(camellia_ecb_enc_16way)
13657 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
13658 %xmm8, %rsi);
13659
13660+ pax_force_retaddr
13661 ret;
13662 ENDPROC(camellia_ecb_enc_16way)
13663
13664@@ -932,6 +938,7 @@ ENTRY(camellia_ecb_dec_16way)
13665 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
13666 %xmm8, %rsi);
13667
13668+ pax_force_retaddr
13669 ret;
13670 ENDPROC(camellia_ecb_dec_16way)
13671
13672@@ -981,6 +988,7 @@ ENTRY(camellia_cbc_dec_16way)
13673 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
13674 %xmm8, %rsi);
13675
13676+ pax_force_retaddr
13677 ret;
13678 ENDPROC(camellia_cbc_dec_16way)
13679
13680@@ -1092,6 +1100,7 @@ ENTRY(camellia_ctr_16way)
13681 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
13682 %xmm8, %rsi);
13683
13684+ pax_force_retaddr
13685 ret;
13686 ENDPROC(camellia_ctr_16way)
13687
13688@@ -1234,6 +1243,7 @@ camellia_xts_crypt_16way:
13689 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
13690 %xmm8, %rsi);
13691
13692+ pax_force_retaddr
13693 ret;
13694 ENDPROC(camellia_xts_crypt_16way)
13695
13696diff --git a/arch/x86/crypto/camellia-aesni-avx2-asm_64.S b/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
13697index 0e0b886..5a3123c 100644
13698--- a/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
13699+++ b/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
13700@@ -11,6 +11,7 @@
13701 */
13702
13703 #include <linux/linkage.h>
13704+#include <asm/alternative-asm.h>
13705
13706 #define CAMELLIA_TABLE_BYTE_LEN 272
13707
13708@@ -230,6 +231,7 @@ roundsm32_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd:
13709 roundsm32(%ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7,
13710 %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14, %ymm15,
13711 %rcx, (%r9));
13712+ pax_force_retaddr
13713 ret;
13714 ENDPROC(roundsm32_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd)
13715
13716@@ -238,6 +240,7 @@ roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab:
13717 roundsm32(%ymm4, %ymm5, %ymm6, %ymm7, %ymm0, %ymm1, %ymm2, %ymm3,
13718 %ymm12, %ymm13, %ymm14, %ymm15, %ymm8, %ymm9, %ymm10, %ymm11,
13719 %rax, (%r9));
13720+ pax_force_retaddr
13721 ret;
13722 ENDPROC(roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab)
13723
13724@@ -820,6 +823,7 @@ __camellia_enc_blk32:
13725 %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14,
13726 %ymm15, (key_table)(CTX, %r8, 8), (%rax), 1 * 32(%rax));
13727
13728+ pax_force_retaddr
13729 ret;
13730
13731 .align 8
13732@@ -905,6 +909,7 @@ __camellia_dec_blk32:
13733 %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14,
13734 %ymm15, (key_table)(CTX), (%rax), 1 * 32(%rax));
13735
13736+ pax_force_retaddr
13737 ret;
13738
13739 .align 8
13740@@ -948,6 +953,7 @@ ENTRY(camellia_ecb_enc_32way)
13741
13742 vzeroupper;
13743
13744+ pax_force_retaddr
13745 ret;
13746 ENDPROC(camellia_ecb_enc_32way)
13747
13748@@ -980,6 +986,7 @@ ENTRY(camellia_ecb_dec_32way)
13749
13750 vzeroupper;
13751
13752+ pax_force_retaddr
13753 ret;
13754 ENDPROC(camellia_ecb_dec_32way)
13755
13756@@ -1046,6 +1053,7 @@ ENTRY(camellia_cbc_dec_32way)
13757
13758 vzeroupper;
13759
13760+ pax_force_retaddr
13761 ret;
13762 ENDPROC(camellia_cbc_dec_32way)
13763
13764@@ -1184,6 +1192,7 @@ ENTRY(camellia_ctr_32way)
13765
13766 vzeroupper;
13767
13768+ pax_force_retaddr
13769 ret;
13770 ENDPROC(camellia_ctr_32way)
13771
13772@@ -1349,6 +1358,7 @@ camellia_xts_crypt_32way:
13773
13774 vzeroupper;
13775
13776+ pax_force_retaddr
13777 ret;
13778 ENDPROC(camellia_xts_crypt_32way)
13779
13780diff --git a/arch/x86/crypto/camellia-x86_64-asm_64.S b/arch/x86/crypto/camellia-x86_64-asm_64.S
13781index 310319c..db3d7b5 100644
13782--- a/arch/x86/crypto/camellia-x86_64-asm_64.S
13783+++ b/arch/x86/crypto/camellia-x86_64-asm_64.S
13784@@ -21,6 +21,7 @@
13785 */
13786
13787 #include <linux/linkage.h>
13788+#include <asm/alternative-asm.h>
13789
13790 .file "camellia-x86_64-asm_64.S"
13791 .text
13792@@ -228,12 +229,14 @@ ENTRY(__camellia_enc_blk)
13793 enc_outunpack(mov, RT1);
13794
13795 movq RRBP, %rbp;
13796+ pax_force_retaddr
13797 ret;
13798
13799 .L__enc_xor:
13800 enc_outunpack(xor, RT1);
13801
13802 movq RRBP, %rbp;
13803+ pax_force_retaddr
13804 ret;
13805 ENDPROC(__camellia_enc_blk)
13806
13807@@ -272,6 +275,7 @@ ENTRY(camellia_dec_blk)
13808 dec_outunpack();
13809
13810 movq RRBP, %rbp;
13811+ pax_force_retaddr
13812 ret;
13813 ENDPROC(camellia_dec_blk)
13814
13815@@ -463,6 +467,7 @@ ENTRY(__camellia_enc_blk_2way)
13816
13817 movq RRBP, %rbp;
13818 popq %rbx;
13819+ pax_force_retaddr
13820 ret;
13821
13822 .L__enc2_xor:
13823@@ -470,6 +475,7 @@ ENTRY(__camellia_enc_blk_2way)
13824
13825 movq RRBP, %rbp;
13826 popq %rbx;
13827+ pax_force_retaddr
13828 ret;
13829 ENDPROC(__camellia_enc_blk_2way)
13830
13831@@ -510,5 +516,6 @@ ENTRY(camellia_dec_blk_2way)
13832
13833 movq RRBP, %rbp;
13834 movq RXOR, %rbx;
13835+ pax_force_retaddr
13836 ret;
13837 ENDPROC(camellia_dec_blk_2way)
13838diff --git a/arch/x86/crypto/cast5-avx-x86_64-asm_64.S b/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
13839index c35fd5d..2d8c7db 100644
13840--- a/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
13841+++ b/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
13842@@ -24,6 +24,7 @@
13843 */
13844
13845 #include <linux/linkage.h>
13846+#include <asm/alternative-asm.h>
13847
13848 .file "cast5-avx-x86_64-asm_64.S"
13849
13850@@ -281,6 +282,7 @@ __cast5_enc_blk16:
13851 outunpack_blocks(RR3, RL3, RTMP, RX, RKM);
13852 outunpack_blocks(RR4, RL4, RTMP, RX, RKM);
13853
13854+ pax_force_retaddr
13855 ret;
13856 ENDPROC(__cast5_enc_blk16)
13857
13858@@ -352,6 +354,7 @@ __cast5_dec_blk16:
13859 outunpack_blocks(RR3, RL3, RTMP, RX, RKM);
13860 outunpack_blocks(RR4, RL4, RTMP, RX, RKM);
13861
13862+ pax_force_retaddr
13863 ret;
13864
13865 .L__skip_dec:
13866@@ -388,6 +391,7 @@ ENTRY(cast5_ecb_enc_16way)
13867 vmovdqu RR4, (6*4*4)(%r11);
13868 vmovdqu RL4, (7*4*4)(%r11);
13869
13870+ pax_force_retaddr
13871 ret;
13872 ENDPROC(cast5_ecb_enc_16way)
13873
13874@@ -420,6 +424,7 @@ ENTRY(cast5_ecb_dec_16way)
13875 vmovdqu RR4, (6*4*4)(%r11);
13876 vmovdqu RL4, (7*4*4)(%r11);
13877
13878+ pax_force_retaddr
13879 ret;
13880 ENDPROC(cast5_ecb_dec_16way)
13881
13882@@ -430,10 +435,10 @@ ENTRY(cast5_cbc_dec_16way)
13883 * %rdx: src
13884 */
13885
13886- pushq %r12;
13887+ pushq %r14;
13888
13889 movq %rsi, %r11;
13890- movq %rdx, %r12;
13891+ movq %rdx, %r14;
13892
13893 vmovdqu (0*16)(%rdx), RL1;
13894 vmovdqu (1*16)(%rdx), RR1;
13895@@ -447,16 +452,16 @@ ENTRY(cast5_cbc_dec_16way)
13896 call __cast5_dec_blk16;
13897
13898 /* xor with src */
13899- vmovq (%r12), RX;
13900+ vmovq (%r14), RX;
13901 vpshufd $0x4f, RX, RX;
13902 vpxor RX, RR1, RR1;
13903- vpxor 0*16+8(%r12), RL1, RL1;
13904- vpxor 1*16+8(%r12), RR2, RR2;
13905- vpxor 2*16+8(%r12), RL2, RL2;
13906- vpxor 3*16+8(%r12), RR3, RR3;
13907- vpxor 4*16+8(%r12), RL3, RL3;
13908- vpxor 5*16+8(%r12), RR4, RR4;
13909- vpxor 6*16+8(%r12), RL4, RL4;
13910+ vpxor 0*16+8(%r14), RL1, RL1;
13911+ vpxor 1*16+8(%r14), RR2, RR2;
13912+ vpxor 2*16+8(%r14), RL2, RL2;
13913+ vpxor 3*16+8(%r14), RR3, RR3;
13914+ vpxor 4*16+8(%r14), RL3, RL3;
13915+ vpxor 5*16+8(%r14), RR4, RR4;
13916+ vpxor 6*16+8(%r14), RL4, RL4;
13917
13918 vmovdqu RR1, (0*16)(%r11);
13919 vmovdqu RL1, (1*16)(%r11);
13920@@ -467,8 +472,9 @@ ENTRY(cast5_cbc_dec_16way)
13921 vmovdqu RR4, (6*16)(%r11);
13922 vmovdqu RL4, (7*16)(%r11);
13923
13924- popq %r12;
13925+ popq %r14;
13926
13927+ pax_force_retaddr
13928 ret;
13929 ENDPROC(cast5_cbc_dec_16way)
13930
13931@@ -480,10 +486,10 @@ ENTRY(cast5_ctr_16way)
13932 * %rcx: iv (big endian, 64bit)
13933 */
13934
13935- pushq %r12;
13936+ pushq %r14;
13937
13938 movq %rsi, %r11;
13939- movq %rdx, %r12;
13940+ movq %rdx, %r14;
13941
13942 vpcmpeqd RTMP, RTMP, RTMP;
13943 vpsrldq $8, RTMP, RTMP; /* low: -1, high: 0 */
13944@@ -523,14 +529,14 @@ ENTRY(cast5_ctr_16way)
13945 call __cast5_enc_blk16;
13946
13947 /* dst = src ^ iv */
13948- vpxor (0*16)(%r12), RR1, RR1;
13949- vpxor (1*16)(%r12), RL1, RL1;
13950- vpxor (2*16)(%r12), RR2, RR2;
13951- vpxor (3*16)(%r12), RL2, RL2;
13952- vpxor (4*16)(%r12), RR3, RR3;
13953- vpxor (5*16)(%r12), RL3, RL3;
13954- vpxor (6*16)(%r12), RR4, RR4;
13955- vpxor (7*16)(%r12), RL4, RL4;
13956+ vpxor (0*16)(%r14), RR1, RR1;
13957+ vpxor (1*16)(%r14), RL1, RL1;
13958+ vpxor (2*16)(%r14), RR2, RR2;
13959+ vpxor (3*16)(%r14), RL2, RL2;
13960+ vpxor (4*16)(%r14), RR3, RR3;
13961+ vpxor (5*16)(%r14), RL3, RL3;
13962+ vpxor (6*16)(%r14), RR4, RR4;
13963+ vpxor (7*16)(%r14), RL4, RL4;
13964 vmovdqu RR1, (0*16)(%r11);
13965 vmovdqu RL1, (1*16)(%r11);
13966 vmovdqu RR2, (2*16)(%r11);
13967@@ -540,7 +546,8 @@ ENTRY(cast5_ctr_16way)
13968 vmovdqu RR4, (6*16)(%r11);
13969 vmovdqu RL4, (7*16)(%r11);
13970
13971- popq %r12;
13972+ popq %r14;
13973
13974+ pax_force_retaddr
13975 ret;
13976 ENDPROC(cast5_ctr_16way)
13977diff --git a/arch/x86/crypto/cast6-avx-x86_64-asm_64.S b/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
13978index e3531f8..e123f35 100644
13979--- a/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
13980+++ b/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
13981@@ -24,6 +24,7 @@
13982 */
13983
13984 #include <linux/linkage.h>
13985+#include <asm/alternative-asm.h>
13986 #include "glue_helper-asm-avx.S"
13987
13988 .file "cast6-avx-x86_64-asm_64.S"
13989@@ -295,6 +296,7 @@ __cast6_enc_blk8:
13990 outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
13991 outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
13992
13993+ pax_force_retaddr
13994 ret;
13995 ENDPROC(__cast6_enc_blk8)
13996
13997@@ -340,6 +342,7 @@ __cast6_dec_blk8:
13998 outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
13999 outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
14000
14001+ pax_force_retaddr
14002 ret;
14003 ENDPROC(__cast6_dec_blk8)
14004
14005@@ -358,6 +361,7 @@ ENTRY(cast6_ecb_enc_8way)
14006
14007 store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14008
14009+ pax_force_retaddr
14010 ret;
14011 ENDPROC(cast6_ecb_enc_8way)
14012
14013@@ -376,6 +380,7 @@ ENTRY(cast6_ecb_dec_8way)
14014
14015 store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14016
14017+ pax_force_retaddr
14018 ret;
14019 ENDPROC(cast6_ecb_dec_8way)
14020
14021@@ -386,19 +391,20 @@ ENTRY(cast6_cbc_dec_8way)
14022 * %rdx: src
14023 */
14024
14025- pushq %r12;
14026+ pushq %r14;
14027
14028 movq %rsi, %r11;
14029- movq %rdx, %r12;
14030+ movq %rdx, %r14;
14031
14032 load_8way(%rdx, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14033
14034 call __cast6_dec_blk8;
14035
14036- store_cbc_8way(%r12, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14037+ store_cbc_8way(%r14, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14038
14039- popq %r12;
14040+ popq %r14;
14041
14042+ pax_force_retaddr
14043 ret;
14044 ENDPROC(cast6_cbc_dec_8way)
14045
14046@@ -410,20 +416,21 @@ ENTRY(cast6_ctr_8way)
14047 * %rcx: iv (little endian, 128bit)
14048 */
14049
14050- pushq %r12;
14051+ pushq %r14;
14052
14053 movq %rsi, %r11;
14054- movq %rdx, %r12;
14055+ movq %rdx, %r14;
14056
14057 load_ctr_8way(%rcx, .Lbswap128_mask, RA1, RB1, RC1, RD1, RA2, RB2, RC2,
14058 RD2, RX, RKR, RKM);
14059
14060 call __cast6_enc_blk8;
14061
14062- store_ctr_8way(%r12, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14063+ store_ctr_8way(%r14, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14064
14065- popq %r12;
14066+ popq %r14;
14067
14068+ pax_force_retaddr
14069 ret;
14070 ENDPROC(cast6_ctr_8way)
14071
14072@@ -446,6 +453,7 @@ ENTRY(cast6_xts_enc_8way)
14073 /* dst <= regs xor IVs(in dst) */
14074 store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14075
14076+ pax_force_retaddr
14077 ret;
14078 ENDPROC(cast6_xts_enc_8way)
14079
14080@@ -468,5 +476,6 @@ ENTRY(cast6_xts_dec_8way)
14081 /* dst <= regs xor IVs(in dst) */
14082 store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14083
14084+ pax_force_retaddr
14085 ret;
14086 ENDPROC(cast6_xts_dec_8way)
14087diff --git a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
14088index dbc4339..de6e120 100644
14089--- a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
14090+++ b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
14091@@ -45,6 +45,7 @@
14092
14093 #include <asm/inst.h>
14094 #include <linux/linkage.h>
14095+#include <asm/alternative-asm.h>
14096
14097 ## ISCSI CRC 32 Implementation with crc32 and pclmulqdq Instruction
14098
14099@@ -312,6 +313,7 @@ do_return:
14100 popq %rsi
14101 popq %rdi
14102 popq %rbx
14103+ pax_force_retaddr
14104 ret
14105
14106 ################################################################
14107diff --git a/arch/x86/crypto/ghash-clmulni-intel_asm.S b/arch/x86/crypto/ghash-clmulni-intel_asm.S
14108index 185fad4..ff4cd36 100644
14109--- a/arch/x86/crypto/ghash-clmulni-intel_asm.S
14110+++ b/arch/x86/crypto/ghash-clmulni-intel_asm.S
14111@@ -18,6 +18,7 @@
14112
14113 #include <linux/linkage.h>
14114 #include <asm/inst.h>
14115+#include <asm/alternative-asm.h>
14116
14117 .data
14118
14119@@ -89,6 +90,7 @@ __clmul_gf128mul_ble:
14120 psrlq $1, T2
14121 pxor T2, T1
14122 pxor T1, DATA
14123+ pax_force_retaddr
14124 ret
14125 ENDPROC(__clmul_gf128mul_ble)
14126
14127@@ -101,6 +103,7 @@ ENTRY(clmul_ghash_mul)
14128 call __clmul_gf128mul_ble
14129 PSHUFB_XMM BSWAP DATA
14130 movups DATA, (%rdi)
14131+ pax_force_retaddr
14132 ret
14133 ENDPROC(clmul_ghash_mul)
14134
14135@@ -128,5 +131,6 @@ ENTRY(clmul_ghash_update)
14136 PSHUFB_XMM BSWAP DATA
14137 movups DATA, (%rdi)
14138 .Lupdate_just_ret:
14139+ pax_force_retaddr
14140 ret
14141 ENDPROC(clmul_ghash_update)
14142diff --git a/arch/x86/crypto/salsa20-x86_64-asm_64.S b/arch/x86/crypto/salsa20-x86_64-asm_64.S
14143index 9279e0b..c4b3d2c 100644
14144--- a/arch/x86/crypto/salsa20-x86_64-asm_64.S
14145+++ b/arch/x86/crypto/salsa20-x86_64-asm_64.S
14146@@ -1,4 +1,5 @@
14147 #include <linux/linkage.h>
14148+#include <asm/alternative-asm.h>
14149
14150 # enter salsa20_encrypt_bytes
14151 ENTRY(salsa20_encrypt_bytes)
14152@@ -789,6 +790,7 @@ ENTRY(salsa20_encrypt_bytes)
14153 add %r11,%rsp
14154 mov %rdi,%rax
14155 mov %rsi,%rdx
14156+ pax_force_retaddr
14157 ret
14158 # bytesatleast65:
14159 ._bytesatleast65:
14160@@ -889,6 +891,7 @@ ENTRY(salsa20_keysetup)
14161 add %r11,%rsp
14162 mov %rdi,%rax
14163 mov %rsi,%rdx
14164+ pax_force_retaddr
14165 ret
14166 ENDPROC(salsa20_keysetup)
14167
14168@@ -914,5 +917,6 @@ ENTRY(salsa20_ivsetup)
14169 add %r11,%rsp
14170 mov %rdi,%rax
14171 mov %rsi,%rdx
14172+ pax_force_retaddr
14173 ret
14174 ENDPROC(salsa20_ivsetup)
14175diff --git a/arch/x86/crypto/serpent-avx-x86_64-asm_64.S b/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
14176index 2f202f4..d9164d6 100644
14177--- a/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
14178+++ b/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
14179@@ -24,6 +24,7 @@
14180 */
14181
14182 #include <linux/linkage.h>
14183+#include <asm/alternative-asm.h>
14184 #include "glue_helper-asm-avx.S"
14185
14186 .file "serpent-avx-x86_64-asm_64.S"
14187@@ -618,6 +619,7 @@ __serpent_enc_blk8_avx:
14188 write_blocks(RA1, RB1, RC1, RD1, RK0, RK1, RK2);
14189 write_blocks(RA2, RB2, RC2, RD2, RK0, RK1, RK2);
14190
14191+ pax_force_retaddr
14192 ret;
14193 ENDPROC(__serpent_enc_blk8_avx)
14194
14195@@ -672,6 +674,7 @@ __serpent_dec_blk8_avx:
14196 write_blocks(RC1, RD1, RB1, RE1, RK0, RK1, RK2);
14197 write_blocks(RC2, RD2, RB2, RE2, RK0, RK1, RK2);
14198
14199+ pax_force_retaddr
14200 ret;
14201 ENDPROC(__serpent_dec_blk8_avx)
14202
14203@@ -688,6 +691,7 @@ ENTRY(serpent_ecb_enc_8way_avx)
14204
14205 store_8way(%rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14206
14207+ pax_force_retaddr
14208 ret;
14209 ENDPROC(serpent_ecb_enc_8way_avx)
14210
14211@@ -704,6 +708,7 @@ ENTRY(serpent_ecb_dec_8way_avx)
14212
14213 store_8way(%rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
14214
14215+ pax_force_retaddr
14216 ret;
14217 ENDPROC(serpent_ecb_dec_8way_avx)
14218
14219@@ -720,6 +725,7 @@ ENTRY(serpent_cbc_dec_8way_avx)
14220
14221 store_cbc_8way(%rdx, %rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
14222
14223+ pax_force_retaddr
14224 ret;
14225 ENDPROC(serpent_cbc_dec_8way_avx)
14226
14227@@ -738,6 +744,7 @@ ENTRY(serpent_ctr_8way_avx)
14228
14229 store_ctr_8way(%rdx, %rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14230
14231+ pax_force_retaddr
14232 ret;
14233 ENDPROC(serpent_ctr_8way_avx)
14234
14235@@ -758,6 +765,7 @@ ENTRY(serpent_xts_enc_8way_avx)
14236 /* dst <= regs xor IVs(in dst) */
14237 store_xts_8way(%rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14238
14239+ pax_force_retaddr
14240 ret;
14241 ENDPROC(serpent_xts_enc_8way_avx)
14242
14243@@ -778,5 +786,6 @@ ENTRY(serpent_xts_dec_8way_avx)
14244 /* dst <= regs xor IVs(in dst) */
14245 store_xts_8way(%rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
14246
14247+ pax_force_retaddr
14248 ret;
14249 ENDPROC(serpent_xts_dec_8way_avx)
14250diff --git a/arch/x86/crypto/serpent-avx2-asm_64.S b/arch/x86/crypto/serpent-avx2-asm_64.S
14251index b222085..abd483c 100644
14252--- a/arch/x86/crypto/serpent-avx2-asm_64.S
14253+++ b/arch/x86/crypto/serpent-avx2-asm_64.S
14254@@ -15,6 +15,7 @@
14255 */
14256
14257 #include <linux/linkage.h>
14258+#include <asm/alternative-asm.h>
14259 #include "glue_helper-asm-avx2.S"
14260
14261 .file "serpent-avx2-asm_64.S"
14262@@ -610,6 +611,7 @@ __serpent_enc_blk16:
14263 write_blocks(RA1, RB1, RC1, RD1, RK0, RK1, RK2);
14264 write_blocks(RA2, RB2, RC2, RD2, RK0, RK1, RK2);
14265
14266+ pax_force_retaddr
14267 ret;
14268 ENDPROC(__serpent_enc_blk16)
14269
14270@@ -664,6 +666,7 @@ __serpent_dec_blk16:
14271 write_blocks(RC1, RD1, RB1, RE1, RK0, RK1, RK2);
14272 write_blocks(RC2, RD2, RB2, RE2, RK0, RK1, RK2);
14273
14274+ pax_force_retaddr
14275 ret;
14276 ENDPROC(__serpent_dec_blk16)
14277
14278@@ -684,6 +687,7 @@ ENTRY(serpent_ecb_enc_16way)
14279
14280 vzeroupper;
14281
14282+ pax_force_retaddr
14283 ret;
14284 ENDPROC(serpent_ecb_enc_16way)
14285
14286@@ -704,6 +708,7 @@ ENTRY(serpent_ecb_dec_16way)
14287
14288 vzeroupper;
14289
14290+ pax_force_retaddr
14291 ret;
14292 ENDPROC(serpent_ecb_dec_16way)
14293
14294@@ -725,6 +730,7 @@ ENTRY(serpent_cbc_dec_16way)
14295
14296 vzeroupper;
14297
14298+ pax_force_retaddr
14299 ret;
14300 ENDPROC(serpent_cbc_dec_16way)
14301
14302@@ -748,6 +754,7 @@ ENTRY(serpent_ctr_16way)
14303
14304 vzeroupper;
14305
14306+ pax_force_retaddr
14307 ret;
14308 ENDPROC(serpent_ctr_16way)
14309
14310@@ -772,6 +779,7 @@ ENTRY(serpent_xts_enc_16way)
14311
14312 vzeroupper;
14313
14314+ pax_force_retaddr
14315 ret;
14316 ENDPROC(serpent_xts_enc_16way)
14317
14318@@ -796,5 +804,6 @@ ENTRY(serpent_xts_dec_16way)
14319
14320 vzeroupper;
14321
14322+ pax_force_retaddr
14323 ret;
14324 ENDPROC(serpent_xts_dec_16way)
14325diff --git a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
14326index acc066c..1559cc4 100644
14327--- a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
14328+++ b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
14329@@ -25,6 +25,7 @@
14330 */
14331
14332 #include <linux/linkage.h>
14333+#include <asm/alternative-asm.h>
14334
14335 .file "serpent-sse2-x86_64-asm_64.S"
14336 .text
14337@@ -690,12 +691,14 @@ ENTRY(__serpent_enc_blk_8way)
14338 write_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
14339 write_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
14340
14341+ pax_force_retaddr
14342 ret;
14343
14344 .L__enc_xor8:
14345 xor_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
14346 xor_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
14347
14348+ pax_force_retaddr
14349 ret;
14350 ENDPROC(__serpent_enc_blk_8way)
14351
14352@@ -750,5 +753,6 @@ ENTRY(serpent_dec_blk_8way)
14353 write_blocks(%rsi, RC1, RD1, RB1, RE1, RK0, RK1, RK2);
14354 write_blocks(%rax, RC2, RD2, RB2, RE2, RK0, RK1, RK2);
14355
14356+ pax_force_retaddr
14357 ret;
14358 ENDPROC(serpent_dec_blk_8way)
14359diff --git a/arch/x86/crypto/sha1_ssse3_asm.S b/arch/x86/crypto/sha1_ssse3_asm.S
14360index a410950..9dfe7ad 100644
14361--- a/arch/x86/crypto/sha1_ssse3_asm.S
14362+++ b/arch/x86/crypto/sha1_ssse3_asm.S
14363@@ -29,6 +29,7 @@
14364 */
14365
14366 #include <linux/linkage.h>
14367+#include <asm/alternative-asm.h>
14368
14369 #define CTX %rdi // arg1
14370 #define BUF %rsi // arg2
14371@@ -75,9 +76,9 @@
14372
14373 push %rbx
14374 push %rbp
14375- push %r12
14376+ push %r14
14377
14378- mov %rsp, %r12
14379+ mov %rsp, %r14
14380 sub $64, %rsp # allocate workspace
14381 and $~15, %rsp # align stack
14382
14383@@ -99,11 +100,12 @@
14384 xor %rax, %rax
14385 rep stosq
14386
14387- mov %r12, %rsp # deallocate workspace
14388+ mov %r14, %rsp # deallocate workspace
14389
14390- pop %r12
14391+ pop %r14
14392 pop %rbp
14393 pop %rbx
14394+ pax_force_retaddr
14395 ret
14396
14397 ENDPROC(\name)
14398diff --git a/arch/x86/crypto/sha256-avx-asm.S b/arch/x86/crypto/sha256-avx-asm.S
14399index 642f156..51a513c 100644
14400--- a/arch/x86/crypto/sha256-avx-asm.S
14401+++ b/arch/x86/crypto/sha256-avx-asm.S
14402@@ -49,6 +49,7 @@
14403
14404 #ifdef CONFIG_AS_AVX
14405 #include <linux/linkage.h>
14406+#include <asm/alternative-asm.h>
14407
14408 ## assume buffers not aligned
14409 #define VMOVDQ vmovdqu
14410@@ -460,6 +461,7 @@ done_hash:
14411 popq %r13
14412 popq %rbp
14413 popq %rbx
14414+ pax_force_retaddr
14415 ret
14416 ENDPROC(sha256_transform_avx)
14417
14418diff --git a/arch/x86/crypto/sha256-avx2-asm.S b/arch/x86/crypto/sha256-avx2-asm.S
14419index 9e86944..3795e6a 100644
14420--- a/arch/x86/crypto/sha256-avx2-asm.S
14421+++ b/arch/x86/crypto/sha256-avx2-asm.S
14422@@ -50,6 +50,7 @@
14423
14424 #ifdef CONFIG_AS_AVX2
14425 #include <linux/linkage.h>
14426+#include <asm/alternative-asm.h>
14427
14428 ## assume buffers not aligned
14429 #define VMOVDQ vmovdqu
14430@@ -720,6 +721,7 @@ done_hash:
14431 popq %r12
14432 popq %rbp
14433 popq %rbx
14434+ pax_force_retaddr
14435 ret
14436 ENDPROC(sha256_transform_rorx)
14437
14438diff --git a/arch/x86/crypto/sha256-ssse3-asm.S b/arch/x86/crypto/sha256-ssse3-asm.S
14439index f833b74..8c62a9e 100644
14440--- a/arch/x86/crypto/sha256-ssse3-asm.S
14441+++ b/arch/x86/crypto/sha256-ssse3-asm.S
14442@@ -47,6 +47,7 @@
14443 ########################################################################
14444
14445 #include <linux/linkage.h>
14446+#include <asm/alternative-asm.h>
14447
14448 ## assume buffers not aligned
14449 #define MOVDQ movdqu
14450@@ -471,6 +472,7 @@ done_hash:
14451 popq %rbp
14452 popq %rbx
14453
14454+ pax_force_retaddr
14455 ret
14456 ENDPROC(sha256_transform_ssse3)
14457
14458diff --git a/arch/x86/crypto/sha512-avx-asm.S b/arch/x86/crypto/sha512-avx-asm.S
14459index 974dde9..a823ff9 100644
14460--- a/arch/x86/crypto/sha512-avx-asm.S
14461+++ b/arch/x86/crypto/sha512-avx-asm.S
14462@@ -49,6 +49,7 @@
14463
14464 #ifdef CONFIG_AS_AVX
14465 #include <linux/linkage.h>
14466+#include <asm/alternative-asm.h>
14467
14468 .text
14469
14470@@ -364,6 +365,7 @@ updateblock:
14471 mov frame_RSPSAVE(%rsp), %rsp
14472
14473 nowork:
14474+ pax_force_retaddr
14475 ret
14476 ENDPROC(sha512_transform_avx)
14477
14478diff --git a/arch/x86/crypto/sha512-avx2-asm.S b/arch/x86/crypto/sha512-avx2-asm.S
14479index 568b961..ed20c37 100644
14480--- a/arch/x86/crypto/sha512-avx2-asm.S
14481+++ b/arch/x86/crypto/sha512-avx2-asm.S
14482@@ -51,6 +51,7 @@
14483
14484 #ifdef CONFIG_AS_AVX2
14485 #include <linux/linkage.h>
14486+#include <asm/alternative-asm.h>
14487
14488 .text
14489
14490@@ -678,6 +679,7 @@ done_hash:
14491
14492 # Restore Stack Pointer
14493 mov frame_RSPSAVE(%rsp), %rsp
14494+ pax_force_retaddr
14495 ret
14496 ENDPROC(sha512_transform_rorx)
14497
14498diff --git a/arch/x86/crypto/sha512-ssse3-asm.S b/arch/x86/crypto/sha512-ssse3-asm.S
14499index fb56855..6edd768 100644
14500--- a/arch/x86/crypto/sha512-ssse3-asm.S
14501+++ b/arch/x86/crypto/sha512-ssse3-asm.S
14502@@ -48,6 +48,7 @@
14503 ########################################################################
14504
14505 #include <linux/linkage.h>
14506+#include <asm/alternative-asm.h>
14507
14508 .text
14509
14510@@ -363,6 +364,7 @@ updateblock:
14511 mov frame_RSPSAVE(%rsp), %rsp
14512
14513 nowork:
14514+ pax_force_retaddr
14515 ret
14516 ENDPROC(sha512_transform_ssse3)
14517
14518diff --git a/arch/x86/crypto/twofish-avx-x86_64-asm_64.S b/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
14519index 0505813..b067311 100644
14520--- a/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
14521+++ b/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
14522@@ -24,6 +24,7 @@
14523 */
14524
14525 #include <linux/linkage.h>
14526+#include <asm/alternative-asm.h>
14527 #include "glue_helper-asm-avx.S"
14528
14529 .file "twofish-avx-x86_64-asm_64.S"
14530@@ -284,6 +285,7 @@ __twofish_enc_blk8:
14531 outunpack_blocks(RC1, RD1, RA1, RB1, RK1, RX0, RY0, RK2);
14532 outunpack_blocks(RC2, RD2, RA2, RB2, RK1, RX0, RY0, RK2);
14533
14534+ pax_force_retaddr
14535 ret;
14536 ENDPROC(__twofish_enc_blk8)
14537
14538@@ -324,6 +326,7 @@ __twofish_dec_blk8:
14539 outunpack_blocks(RA1, RB1, RC1, RD1, RK1, RX0, RY0, RK2);
14540 outunpack_blocks(RA2, RB2, RC2, RD2, RK1, RX0, RY0, RK2);
14541
14542+ pax_force_retaddr
14543 ret;
14544 ENDPROC(__twofish_dec_blk8)
14545
14546@@ -342,6 +345,7 @@ ENTRY(twofish_ecb_enc_8way)
14547
14548 store_8way(%r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
14549
14550+ pax_force_retaddr
14551 ret;
14552 ENDPROC(twofish_ecb_enc_8way)
14553
14554@@ -360,6 +364,7 @@ ENTRY(twofish_ecb_dec_8way)
14555
14556 store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14557
14558+ pax_force_retaddr
14559 ret;
14560 ENDPROC(twofish_ecb_dec_8way)
14561
14562@@ -370,19 +375,20 @@ ENTRY(twofish_cbc_dec_8way)
14563 * %rdx: src
14564 */
14565
14566- pushq %r12;
14567+ pushq %r14;
14568
14569 movq %rsi, %r11;
14570- movq %rdx, %r12;
14571+ movq %rdx, %r14;
14572
14573 load_8way(%rdx, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
14574
14575 call __twofish_dec_blk8;
14576
14577- store_cbc_8way(%r12, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14578+ store_cbc_8way(%r14, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14579
14580- popq %r12;
14581+ popq %r14;
14582
14583+ pax_force_retaddr
14584 ret;
14585 ENDPROC(twofish_cbc_dec_8way)
14586
14587@@ -394,20 +400,21 @@ ENTRY(twofish_ctr_8way)
14588 * %rcx: iv (little endian, 128bit)
14589 */
14590
14591- pushq %r12;
14592+ pushq %r14;
14593
14594 movq %rsi, %r11;
14595- movq %rdx, %r12;
14596+ movq %rdx, %r14;
14597
14598 load_ctr_8way(%rcx, .Lbswap128_mask, RA1, RB1, RC1, RD1, RA2, RB2, RC2,
14599 RD2, RX0, RX1, RY0);
14600
14601 call __twofish_enc_blk8;
14602
14603- store_ctr_8way(%r12, %r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
14604+ store_ctr_8way(%r14, %r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
14605
14606- popq %r12;
14607+ popq %r14;
14608
14609+ pax_force_retaddr
14610 ret;
14611 ENDPROC(twofish_ctr_8way)
14612
14613@@ -430,6 +437,7 @@ ENTRY(twofish_xts_enc_8way)
14614 /* dst <= regs xor IVs(in dst) */
14615 store_xts_8way(%r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
14616
14617+ pax_force_retaddr
14618 ret;
14619 ENDPROC(twofish_xts_enc_8way)
14620
14621@@ -452,5 +460,6 @@ ENTRY(twofish_xts_dec_8way)
14622 /* dst <= regs xor IVs(in dst) */
14623 store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14624
14625+ pax_force_retaddr
14626 ret;
14627 ENDPROC(twofish_xts_dec_8way)
14628diff --git a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
14629index 1c3b7ce..02f578d 100644
14630--- a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
14631+++ b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
14632@@ -21,6 +21,7 @@
14633 */
14634
14635 #include <linux/linkage.h>
14636+#include <asm/alternative-asm.h>
14637
14638 .file "twofish-x86_64-asm-3way.S"
14639 .text
14640@@ -258,6 +259,7 @@ ENTRY(__twofish_enc_blk_3way)
14641 popq %r13;
14642 popq %r14;
14643 popq %r15;
14644+ pax_force_retaddr
14645 ret;
14646
14647 .L__enc_xor3:
14648@@ -269,6 +271,7 @@ ENTRY(__twofish_enc_blk_3way)
14649 popq %r13;
14650 popq %r14;
14651 popq %r15;
14652+ pax_force_retaddr
14653 ret;
14654 ENDPROC(__twofish_enc_blk_3way)
14655
14656@@ -308,5 +311,6 @@ ENTRY(twofish_dec_blk_3way)
14657 popq %r13;
14658 popq %r14;
14659 popq %r15;
14660+ pax_force_retaddr
14661 ret;
14662 ENDPROC(twofish_dec_blk_3way)
14663diff --git a/arch/x86/crypto/twofish-x86_64-asm_64.S b/arch/x86/crypto/twofish-x86_64-asm_64.S
14664index a039d21..524b8b2 100644
14665--- a/arch/x86/crypto/twofish-x86_64-asm_64.S
14666+++ b/arch/x86/crypto/twofish-x86_64-asm_64.S
14667@@ -22,6 +22,7 @@
14668
14669 #include <linux/linkage.h>
14670 #include <asm/asm-offsets.h>
14671+#include <asm/alternative-asm.h>
14672
14673 #define a_offset 0
14674 #define b_offset 4
14675@@ -265,6 +266,7 @@ ENTRY(twofish_enc_blk)
14676
14677 popq R1
14678 movq $1,%rax
14679+ pax_force_retaddr
14680 ret
14681 ENDPROC(twofish_enc_blk)
14682
14683@@ -317,5 +319,6 @@ ENTRY(twofish_dec_blk)
14684
14685 popq R1
14686 movq $1,%rax
14687+ pax_force_retaddr
14688 ret
14689 ENDPROC(twofish_dec_blk)
14690diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
14691index d21ff89..6da8e6e 100644
14692--- a/arch/x86/ia32/ia32_aout.c
14693+++ b/arch/x86/ia32/ia32_aout.c
14694@@ -153,6 +153,8 @@ static int aout_core_dump(struct coredump_params *cprm)
14695 unsigned long dump_start, dump_size;
14696 struct user32 dump;
14697
14698+ memset(&dump, 0, sizeof(dump));
14699+
14700 fs = get_fs();
14701 set_fs(KERNEL_DS);
14702 has_dumped = 1;
14703diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
14704index 2206757..85cbcfa 100644
14705--- a/arch/x86/ia32/ia32_signal.c
14706+++ b/arch/x86/ia32/ia32_signal.c
14707@@ -218,7 +218,7 @@ asmlinkage long sys32_sigreturn(void)
14708 if (__get_user(set.sig[0], &frame->sc.oldmask)
14709 || (_COMPAT_NSIG_WORDS > 1
14710 && __copy_from_user((((char *) &set.sig) + 4),
14711- &frame->extramask,
14712+ frame->extramask,
14713 sizeof(frame->extramask))))
14714 goto badframe;
14715
14716@@ -338,7 +338,7 @@ static void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs,
14717 sp -= frame_size;
14718 /* Align the stack pointer according to the i386 ABI,
14719 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
14720- sp = ((sp + 4) & -16ul) - 4;
14721+ sp = ((sp - 12) & -16ul) - 4;
14722 return (void __user *) sp;
14723 }
14724
14725@@ -386,7 +386,7 @@ int ia32_setup_frame(int sig, struct ksignal *ksig,
14726 restorer = VDSO32_SYMBOL(current->mm->context.vdso,
14727 sigreturn);
14728 else
14729- restorer = &frame->retcode;
14730+ restorer = frame->retcode;
14731 }
14732
14733 put_user_try {
14734@@ -396,7 +396,7 @@ int ia32_setup_frame(int sig, struct ksignal *ksig,
14735 * These are actually not used anymore, but left because some
14736 * gdb versions depend on them as a marker.
14737 */
14738- put_user_ex(*((u64 *)&code), (u64 __user *)frame->retcode);
14739+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
14740 } put_user_catch(err);
14741
14742 if (err)
14743@@ -438,7 +438,7 @@ int ia32_setup_rt_frame(int sig, struct ksignal *ksig,
14744 0xb8,
14745 __NR_ia32_rt_sigreturn,
14746 0x80cd,
14747- 0,
14748+ 0
14749 };
14750
14751 frame = get_sigframe(ksig, regs, sizeof(*frame), &fpstate);
14752@@ -461,16 +461,18 @@ int ia32_setup_rt_frame(int sig, struct ksignal *ksig,
14753
14754 if (ksig->ka.sa.sa_flags & SA_RESTORER)
14755 restorer = ksig->ka.sa.sa_restorer;
14756+ else if (current->mm->context.vdso)
14757+ /* Return stub is in 32bit vsyscall page */
14758+ restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
14759 else
14760- restorer = VDSO32_SYMBOL(current->mm->context.vdso,
14761- rt_sigreturn);
14762+ restorer = frame->retcode;
14763 put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
14764
14765 /*
14766 * Not actually used anymore, but left because some gdb
14767 * versions need it.
14768 */
14769- put_user_ex(*((u64 *)&code), (u64 __user *)frame->retcode);
14770+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
14771 } put_user_catch(err);
14772
14773 err |= copy_siginfo_to_user32(&frame->info, &ksig->info);
14774diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
14775index 4299eb0..c0687a7 100644
14776--- a/arch/x86/ia32/ia32entry.S
14777+++ b/arch/x86/ia32/ia32entry.S
14778@@ -15,8 +15,10 @@
14779 #include <asm/irqflags.h>
14780 #include <asm/asm.h>
14781 #include <asm/smap.h>
14782+#include <asm/pgtable.h>
14783 #include <linux/linkage.h>
14784 #include <linux/err.h>
14785+#include <asm/alternative-asm.h>
14786
14787 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
14788 #include <linux/elf-em.h>
14789@@ -62,12 +64,12 @@
14790 */
14791 .macro LOAD_ARGS32 offset, _r9=0
14792 .if \_r9
14793- movl \offset+16(%rsp),%r9d
14794+ movl \offset+R9(%rsp),%r9d
14795 .endif
14796- movl \offset+40(%rsp),%ecx
14797- movl \offset+48(%rsp),%edx
14798- movl \offset+56(%rsp),%esi
14799- movl \offset+64(%rsp),%edi
14800+ movl \offset+RCX(%rsp),%ecx
14801+ movl \offset+RDX(%rsp),%edx
14802+ movl \offset+RSI(%rsp),%esi
14803+ movl \offset+RDI(%rsp),%edi
14804 movl %eax,%eax /* zero extension */
14805 .endm
14806
14807@@ -96,6 +98,32 @@ ENTRY(native_irq_enable_sysexit)
14808 ENDPROC(native_irq_enable_sysexit)
14809 #endif
14810
14811+ .macro pax_enter_kernel_user
14812+ pax_set_fptr_mask
14813+#ifdef CONFIG_PAX_MEMORY_UDEREF
14814+ call pax_enter_kernel_user
14815+#endif
14816+ .endm
14817+
14818+ .macro pax_exit_kernel_user
14819+#ifdef CONFIG_PAX_MEMORY_UDEREF
14820+ call pax_exit_kernel_user
14821+#endif
14822+#ifdef CONFIG_PAX_RANDKSTACK
14823+ pushq %rax
14824+ pushq %r11
14825+ call pax_randomize_kstack
14826+ popq %r11
14827+ popq %rax
14828+#endif
14829+ .endm
14830+
14831+ .macro pax_erase_kstack
14832+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
14833+ call pax_erase_kstack
14834+#endif
14835+ .endm
14836+
14837 /*
14838 * 32bit SYSENTER instruction entry.
14839 *
14840@@ -122,12 +150,6 @@ ENTRY(ia32_sysenter_target)
14841 CFI_REGISTER rsp,rbp
14842 SWAPGS_UNSAFE_STACK
14843 movq PER_CPU_VAR(kernel_stack), %rsp
14844- addq $(KERNEL_STACK_OFFSET),%rsp
14845- /*
14846- * No need to follow this irqs on/off section: the syscall
14847- * disabled irqs, here we enable it straight after entry:
14848- */
14849- ENABLE_INTERRUPTS(CLBR_NONE)
14850 movl %ebp,%ebp /* zero extension */
14851 pushq_cfi $__USER32_DS
14852 /*CFI_REL_OFFSET ss,0*/
14853@@ -135,24 +157,49 @@ ENTRY(ia32_sysenter_target)
14854 CFI_REL_OFFSET rsp,0
14855 pushfq_cfi
14856 /*CFI_REL_OFFSET rflags,0*/
14857- movl TI_sysenter_return+THREAD_INFO(%rsp,3*8-KERNEL_STACK_OFFSET),%r10d
14858- CFI_REGISTER rip,r10
14859+ orl $X86_EFLAGS_IF,(%rsp)
14860+ GET_THREAD_INFO(%r11)
14861+ movl TI_sysenter_return(%r11), %r11d
14862+ CFI_REGISTER rip,r11
14863 pushq_cfi $__USER32_CS
14864 /*CFI_REL_OFFSET cs,0*/
14865 movl %eax, %eax
14866- pushq_cfi %r10
14867+ pushq_cfi %r11
14868 CFI_REL_OFFSET rip,0
14869 pushq_cfi %rax
14870 cld
14871 SAVE_ARGS 0,1,0
14872+ pax_enter_kernel_user
14873+
14874+#ifdef CONFIG_PAX_RANDKSTACK
14875+ pax_erase_kstack
14876+#endif
14877+
14878+ /*
14879+ * No need to follow this irqs on/off section: the syscall
14880+ * disabled irqs, here we enable it straight after entry:
14881+ */
14882+ ENABLE_INTERRUPTS(CLBR_NONE)
14883 /* no need to do an access_ok check here because rbp has been
14884 32bit zero extended */
14885+
14886+#ifdef CONFIG_PAX_MEMORY_UDEREF
14887+ addq pax_user_shadow_base,%rbp
14888+ ASM_PAX_OPEN_USERLAND
14889+#endif
14890+
14891 ASM_STAC
14892 1: movl (%rbp),%ebp
14893 _ASM_EXTABLE(1b,ia32_badarg)
14894 ASM_CLAC
14895- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14896- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14897+
14898+#ifdef CONFIG_PAX_MEMORY_UDEREF
14899+ ASM_PAX_CLOSE_USERLAND
14900+#endif
14901+
14902+ GET_THREAD_INFO(%r11)
14903+ orl $TS_COMPAT,TI_status(%r11)
14904+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
14905 CFI_REMEMBER_STATE
14906 jnz sysenter_tracesys
14907 cmpq $(IA32_NR_syscalls-1),%rax
14908@@ -162,15 +209,18 @@ sysenter_do_call:
14909 sysenter_dispatch:
14910 call *ia32_sys_call_table(,%rax,8)
14911 movq %rax,RAX-ARGOFFSET(%rsp)
14912+ GET_THREAD_INFO(%r11)
14913 DISABLE_INTERRUPTS(CLBR_NONE)
14914 TRACE_IRQS_OFF
14915- testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14916+ testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
14917 jnz sysexit_audit
14918 sysexit_from_sys_call:
14919- andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14920+ pax_exit_kernel_user
14921+ pax_erase_kstack
14922+ andl $~TS_COMPAT,TI_status(%r11)
14923 /* clear IF, that popfq doesn't enable interrupts early */
14924- andl $~0x200,EFLAGS-R11(%rsp)
14925- movl RIP-R11(%rsp),%edx /* User %eip */
14926+ andl $~X86_EFLAGS_IF,EFLAGS(%rsp)
14927+ movl RIP(%rsp),%edx /* User %eip */
14928 CFI_REGISTER rip,rdx
14929 RESTORE_ARGS 0,24,0,0,0,0
14930 xorq %r8,%r8
14931@@ -193,6 +243,9 @@ sysexit_from_sys_call:
14932 movl %eax,%esi /* 2nd arg: syscall number */
14933 movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
14934 call __audit_syscall_entry
14935+
14936+ pax_erase_kstack
14937+
14938 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
14939 cmpq $(IA32_NR_syscalls-1),%rax
14940 ja ia32_badsys
14941@@ -204,7 +257,7 @@ sysexit_from_sys_call:
14942 .endm
14943
14944 .macro auditsys_exit exit
14945- testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14946+ testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
14947 jnz ia32_ret_from_sys_call
14948 TRACE_IRQS_ON
14949 ENABLE_INTERRUPTS(CLBR_NONE)
14950@@ -215,11 +268,12 @@ sysexit_from_sys_call:
14951 1: setbe %al /* 1 if error, 0 if not */
14952 movzbl %al,%edi /* zero-extend that into %edi */
14953 call __audit_syscall_exit
14954+ GET_THREAD_INFO(%r11)
14955 movq RAX-ARGOFFSET(%rsp),%rax /* reload syscall return value */
14956 movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
14957 DISABLE_INTERRUPTS(CLBR_NONE)
14958 TRACE_IRQS_OFF
14959- testl %edi,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14960+ testl %edi,TI_flags(%r11)
14961 jz \exit
14962 CLEAR_RREGS -ARGOFFSET
14963 jmp int_with_check
14964@@ -237,7 +291,7 @@ sysexit_audit:
14965
14966 sysenter_tracesys:
14967 #ifdef CONFIG_AUDITSYSCALL
14968- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14969+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
14970 jz sysenter_auditsys
14971 #endif
14972 SAVE_REST
14973@@ -249,6 +303,9 @@ sysenter_tracesys:
14974 RESTORE_REST
14975 cmpq $(IA32_NR_syscalls-1),%rax
14976 ja int_ret_from_sys_call /* sysenter_tracesys has set RAX(%rsp) */
14977+
14978+ pax_erase_kstack
14979+
14980 jmp sysenter_do_call
14981 CFI_ENDPROC
14982 ENDPROC(ia32_sysenter_target)
14983@@ -276,19 +333,25 @@ ENDPROC(ia32_sysenter_target)
14984 ENTRY(ia32_cstar_target)
14985 CFI_STARTPROC32 simple
14986 CFI_SIGNAL_FRAME
14987- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
14988+ CFI_DEF_CFA rsp,0
14989 CFI_REGISTER rip,rcx
14990 /*CFI_REGISTER rflags,r11*/
14991 SWAPGS_UNSAFE_STACK
14992 movl %esp,%r8d
14993 CFI_REGISTER rsp,r8
14994 movq PER_CPU_VAR(kernel_stack),%rsp
14995+ SAVE_ARGS 8*6,0,0
14996+ pax_enter_kernel_user
14997+
14998+#ifdef CONFIG_PAX_RANDKSTACK
14999+ pax_erase_kstack
15000+#endif
15001+
15002 /*
15003 * No need to follow this irqs on/off section: the syscall
15004 * disabled irqs and here we enable it straight after entry:
15005 */
15006 ENABLE_INTERRUPTS(CLBR_NONE)
15007- SAVE_ARGS 8,0,0
15008 movl %eax,%eax /* zero extension */
15009 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
15010 movq %rcx,RIP-ARGOFFSET(%rsp)
15011@@ -304,12 +367,25 @@ ENTRY(ia32_cstar_target)
15012 /* no need to do an access_ok check here because r8 has been
15013 32bit zero extended */
15014 /* hardware stack frame is complete now */
15015+
15016+#ifdef CONFIG_PAX_MEMORY_UDEREF
15017+ ASM_PAX_OPEN_USERLAND
15018+ movq pax_user_shadow_base,%r8
15019+ addq RSP-ARGOFFSET(%rsp),%r8
15020+#endif
15021+
15022 ASM_STAC
15023 1: movl (%r8),%r9d
15024 _ASM_EXTABLE(1b,ia32_badarg)
15025 ASM_CLAC
15026- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
15027- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
15028+
15029+#ifdef CONFIG_PAX_MEMORY_UDEREF
15030+ ASM_PAX_CLOSE_USERLAND
15031+#endif
15032+
15033+ GET_THREAD_INFO(%r11)
15034+ orl $TS_COMPAT,TI_status(%r11)
15035+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
15036 CFI_REMEMBER_STATE
15037 jnz cstar_tracesys
15038 cmpq $IA32_NR_syscalls-1,%rax
15039@@ -319,13 +395,16 @@ cstar_do_call:
15040 cstar_dispatch:
15041 call *ia32_sys_call_table(,%rax,8)
15042 movq %rax,RAX-ARGOFFSET(%rsp)
15043+ GET_THREAD_INFO(%r11)
15044 DISABLE_INTERRUPTS(CLBR_NONE)
15045 TRACE_IRQS_OFF
15046- testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
15047+ testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
15048 jnz sysretl_audit
15049 sysretl_from_sys_call:
15050- andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
15051- RESTORE_ARGS 0,-ARG_SKIP,0,0,0
15052+ pax_exit_kernel_user
15053+ pax_erase_kstack
15054+ andl $~TS_COMPAT,TI_status(%r11)
15055+ RESTORE_ARGS 0,-ORIG_RAX,0,0,0
15056 movl RIP-ARGOFFSET(%rsp),%ecx
15057 CFI_REGISTER rip,rcx
15058 movl EFLAGS-ARGOFFSET(%rsp),%r11d
15059@@ -352,7 +431,7 @@ sysretl_audit:
15060
15061 cstar_tracesys:
15062 #ifdef CONFIG_AUDITSYSCALL
15063- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
15064+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
15065 jz cstar_auditsys
15066 #endif
15067 xchgl %r9d,%ebp
15068@@ -366,11 +445,19 @@ cstar_tracesys:
15069 xchgl %ebp,%r9d
15070 cmpq $(IA32_NR_syscalls-1),%rax
15071 ja int_ret_from_sys_call /* cstar_tracesys has set RAX(%rsp) */
15072+
15073+ pax_erase_kstack
15074+
15075 jmp cstar_do_call
15076 END(ia32_cstar_target)
15077
15078 ia32_badarg:
15079 ASM_CLAC
15080+
15081+#ifdef CONFIG_PAX_MEMORY_UDEREF
15082+ ASM_PAX_CLOSE_USERLAND
15083+#endif
15084+
15085 movq $-EFAULT,%rax
15086 jmp ia32_sysret
15087 CFI_ENDPROC
15088@@ -407,19 +494,26 @@ ENTRY(ia32_syscall)
15089 CFI_REL_OFFSET rip,RIP-RIP
15090 PARAVIRT_ADJUST_EXCEPTION_FRAME
15091 SWAPGS
15092- /*
15093- * No need to follow this irqs on/off section: the syscall
15094- * disabled irqs and here we enable it straight after entry:
15095- */
15096- ENABLE_INTERRUPTS(CLBR_NONE)
15097 movl %eax,%eax
15098 pushq_cfi %rax
15099 cld
15100 /* note the registers are not zero extended to the sf.
15101 this could be a problem. */
15102 SAVE_ARGS 0,1,0
15103- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
15104- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
15105+ pax_enter_kernel_user
15106+
15107+#ifdef CONFIG_PAX_RANDKSTACK
15108+ pax_erase_kstack
15109+#endif
15110+
15111+ /*
15112+ * No need to follow this irqs on/off section: the syscall
15113+ * disabled irqs and here we enable it straight after entry:
15114+ */
15115+ ENABLE_INTERRUPTS(CLBR_NONE)
15116+ GET_THREAD_INFO(%r11)
15117+ orl $TS_COMPAT,TI_status(%r11)
15118+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
15119 jnz ia32_tracesys
15120 cmpq $(IA32_NR_syscalls-1),%rax
15121 ja ia32_badsys
15122@@ -442,6 +536,9 @@ ia32_tracesys:
15123 RESTORE_REST
15124 cmpq $(IA32_NR_syscalls-1),%rax
15125 ja int_ret_from_sys_call /* ia32_tracesys has set RAX(%rsp) */
15126+
15127+ pax_erase_kstack
15128+
15129 jmp ia32_do_call
15130 END(ia32_syscall)
15131
15132diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c
15133index 8e0ceec..af13504 100644
15134--- a/arch/x86/ia32/sys_ia32.c
15135+++ b/arch/x86/ia32/sys_ia32.c
15136@@ -69,8 +69,8 @@ asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long offset_low,
15137 */
15138 static int cp_stat64(struct stat64 __user *ubuf, struct kstat *stat)
15139 {
15140- typeof(ubuf->st_uid) uid = 0;
15141- typeof(ubuf->st_gid) gid = 0;
15142+ typeof(((struct stat64 *)0)->st_uid) uid = 0;
15143+ typeof(((struct stat64 *)0)->st_gid) gid = 0;
15144 SET_UID(uid, from_kuid_munged(current_user_ns(), stat->uid));
15145 SET_GID(gid, from_kgid_munged(current_user_ns(), stat->gid));
15146 if (!access_ok(VERIFY_WRITE, ubuf, sizeof(struct stat64)) ||
15147diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h
15148index 372231c..51b537d 100644
15149--- a/arch/x86/include/asm/alternative-asm.h
15150+++ b/arch/x86/include/asm/alternative-asm.h
15151@@ -18,6 +18,45 @@
15152 .endm
15153 #endif
15154
15155+#ifdef KERNEXEC_PLUGIN
15156+ .macro pax_force_retaddr_bts rip=0
15157+ btsq $63,\rip(%rsp)
15158+ .endm
15159+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
15160+ .macro pax_force_retaddr rip=0, reload=0
15161+ btsq $63,\rip(%rsp)
15162+ .endm
15163+ .macro pax_force_fptr ptr
15164+ btsq $63,\ptr
15165+ .endm
15166+ .macro pax_set_fptr_mask
15167+ .endm
15168+#endif
15169+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
15170+ .macro pax_force_retaddr rip=0, reload=0
15171+ .if \reload
15172+ pax_set_fptr_mask
15173+ .endif
15174+ orq %r12,\rip(%rsp)
15175+ .endm
15176+ .macro pax_force_fptr ptr
15177+ orq %r12,\ptr
15178+ .endm
15179+ .macro pax_set_fptr_mask
15180+ movabs $0x8000000000000000,%r12
15181+ .endm
15182+#endif
15183+#else
15184+ .macro pax_force_retaddr rip=0, reload=0
15185+ .endm
15186+ .macro pax_force_fptr ptr
15187+ .endm
15188+ .macro pax_force_retaddr_bts rip=0
15189+ .endm
15190+ .macro pax_set_fptr_mask
15191+ .endm
15192+#endif
15193+
15194 .macro altinstruction_entry orig alt feature orig_len alt_len
15195 .long \orig - .
15196 .long \alt - .
15197diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
15198index 0a3f9c9..c9d081d 100644
15199--- a/arch/x86/include/asm/alternative.h
15200+++ b/arch/x86/include/asm/alternative.h
15201@@ -106,7 +106,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
15202 ".pushsection .discard,\"aw\",@progbits\n" \
15203 DISCARD_ENTRY(1) \
15204 ".popsection\n" \
15205- ".pushsection .altinstr_replacement, \"ax\"\n" \
15206+ ".pushsection .altinstr_replacement, \"a\"\n" \
15207 ALTINSTR_REPLACEMENT(newinstr, feature, 1) \
15208 ".popsection"
15209
15210@@ -120,7 +120,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
15211 DISCARD_ENTRY(1) \
15212 DISCARD_ENTRY(2) \
15213 ".popsection\n" \
15214- ".pushsection .altinstr_replacement, \"ax\"\n" \
15215+ ".pushsection .altinstr_replacement, \"a\"\n" \
15216 ALTINSTR_REPLACEMENT(newinstr1, feature1, 1) \
15217 ALTINSTR_REPLACEMENT(newinstr2, feature2, 2) \
15218 ".popsection"
15219diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
15220index 1d2091a..f5074c1 100644
15221--- a/arch/x86/include/asm/apic.h
15222+++ b/arch/x86/include/asm/apic.h
15223@@ -45,7 +45,7 @@ static inline void generic_apic_probe(void)
15224
15225 #ifdef CONFIG_X86_LOCAL_APIC
15226
15227-extern unsigned int apic_verbosity;
15228+extern int apic_verbosity;
15229 extern int local_apic_timer_c2_ok;
15230
15231 extern int disable_apic;
15232diff --git a/arch/x86/include/asm/apm.h b/arch/x86/include/asm/apm.h
15233index 20370c6..a2eb9b0 100644
15234--- a/arch/x86/include/asm/apm.h
15235+++ b/arch/x86/include/asm/apm.h
15236@@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in,
15237 __asm__ __volatile__(APM_DO_ZERO_SEGS
15238 "pushl %%edi\n\t"
15239 "pushl %%ebp\n\t"
15240- "lcall *%%cs:apm_bios_entry\n\t"
15241+ "lcall *%%ss:apm_bios_entry\n\t"
15242 "setc %%al\n\t"
15243 "popl %%ebp\n\t"
15244 "popl %%edi\n\t"
15245@@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_asm(u32 func, u32 ebx_in,
15246 __asm__ __volatile__(APM_DO_ZERO_SEGS
15247 "pushl %%edi\n\t"
15248 "pushl %%ebp\n\t"
15249- "lcall *%%cs:apm_bios_entry\n\t"
15250+ "lcall *%%ss:apm_bios_entry\n\t"
15251 "setc %%bl\n\t"
15252 "popl %%ebp\n\t"
15253 "popl %%edi\n\t"
15254diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
15255index b17f4f4..9620151 100644
15256--- a/arch/x86/include/asm/atomic.h
15257+++ b/arch/x86/include/asm/atomic.h
15258@@ -23,7 +23,18 @@
15259 */
15260 static inline int atomic_read(const atomic_t *v)
15261 {
15262- return (*(volatile int *)&(v)->counter);
15263+ return (*(volatile const int *)&(v)->counter);
15264+}
15265+
15266+/**
15267+ * atomic_read_unchecked - read atomic variable
15268+ * @v: pointer of type atomic_unchecked_t
15269+ *
15270+ * Atomically reads the value of @v.
15271+ */
15272+static inline int __intentional_overflow(-1) atomic_read_unchecked(const atomic_unchecked_t *v)
15273+{
15274+ return (*(volatile const int *)&(v)->counter);
15275 }
15276
15277 /**
15278@@ -39,6 +50,18 @@ static inline void atomic_set(atomic_t *v, int i)
15279 }
15280
15281 /**
15282+ * atomic_set_unchecked - set atomic variable
15283+ * @v: pointer of type atomic_unchecked_t
15284+ * @i: required value
15285+ *
15286+ * Atomically sets the value of @v to @i.
15287+ */
15288+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
15289+{
15290+ v->counter = i;
15291+}
15292+
15293+/**
15294 * atomic_add - add integer to atomic variable
15295 * @i: integer value to add
15296 * @v: pointer of type atomic_t
15297@@ -47,7 +70,29 @@ static inline void atomic_set(atomic_t *v, int i)
15298 */
15299 static inline void atomic_add(int i, atomic_t *v)
15300 {
15301- asm volatile(LOCK_PREFIX "addl %1,%0"
15302+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
15303+
15304+#ifdef CONFIG_PAX_REFCOUNT
15305+ "jno 0f\n"
15306+ LOCK_PREFIX "subl %1,%0\n"
15307+ "int $4\n0:\n"
15308+ _ASM_EXTABLE(0b, 0b)
15309+#endif
15310+
15311+ : "+m" (v->counter)
15312+ : "ir" (i));
15313+}
15314+
15315+/**
15316+ * atomic_add_unchecked - add integer to atomic variable
15317+ * @i: integer value to add
15318+ * @v: pointer of type atomic_unchecked_t
15319+ *
15320+ * Atomically adds @i to @v.
15321+ */
15322+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
15323+{
15324+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
15325 : "+m" (v->counter)
15326 : "ir" (i));
15327 }
15328@@ -61,7 +106,29 @@ static inline void atomic_add(int i, atomic_t *v)
15329 */
15330 static inline void atomic_sub(int i, atomic_t *v)
15331 {
15332- asm volatile(LOCK_PREFIX "subl %1,%0"
15333+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
15334+
15335+#ifdef CONFIG_PAX_REFCOUNT
15336+ "jno 0f\n"
15337+ LOCK_PREFIX "addl %1,%0\n"
15338+ "int $4\n0:\n"
15339+ _ASM_EXTABLE(0b, 0b)
15340+#endif
15341+
15342+ : "+m" (v->counter)
15343+ : "ir" (i));
15344+}
15345+
15346+/**
15347+ * atomic_sub_unchecked - subtract integer from atomic variable
15348+ * @i: integer value to subtract
15349+ * @v: pointer of type atomic_unchecked_t
15350+ *
15351+ * Atomically subtracts @i from @v.
15352+ */
15353+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
15354+{
15355+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
15356 : "+m" (v->counter)
15357 : "ir" (i));
15358 }
15359@@ -77,7 +144,7 @@ static inline void atomic_sub(int i, atomic_t *v)
15360 */
15361 static inline int atomic_sub_and_test(int i, atomic_t *v)
15362 {
15363- GEN_BINARY_RMWcc(LOCK_PREFIX "subl", v->counter, "er", i, "%0", "e");
15364+ GEN_BINARY_RMWcc(LOCK_PREFIX "subl", LOCK_PREFIX "addl", v->counter, "er", i, "%0", "e");
15365 }
15366
15367 /**
15368@@ -88,7 +155,27 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
15369 */
15370 static inline void atomic_inc(atomic_t *v)
15371 {
15372- asm volatile(LOCK_PREFIX "incl %0"
15373+ asm volatile(LOCK_PREFIX "incl %0\n"
15374+
15375+#ifdef CONFIG_PAX_REFCOUNT
15376+ "jno 0f\n"
15377+ LOCK_PREFIX "decl %0\n"
15378+ "int $4\n0:\n"
15379+ _ASM_EXTABLE(0b, 0b)
15380+#endif
15381+
15382+ : "+m" (v->counter));
15383+}
15384+
15385+/**
15386+ * atomic_inc_unchecked - increment atomic variable
15387+ * @v: pointer of type atomic_unchecked_t
15388+ *
15389+ * Atomically increments @v by 1.
15390+ */
15391+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
15392+{
15393+ asm volatile(LOCK_PREFIX "incl %0\n"
15394 : "+m" (v->counter));
15395 }
15396
15397@@ -100,7 +187,27 @@ static inline void atomic_inc(atomic_t *v)
15398 */
15399 static inline void atomic_dec(atomic_t *v)
15400 {
15401- asm volatile(LOCK_PREFIX "decl %0"
15402+ asm volatile(LOCK_PREFIX "decl %0\n"
15403+
15404+#ifdef CONFIG_PAX_REFCOUNT
15405+ "jno 0f\n"
15406+ LOCK_PREFIX "incl %0\n"
15407+ "int $4\n0:\n"
15408+ _ASM_EXTABLE(0b, 0b)
15409+#endif
15410+
15411+ : "+m" (v->counter));
15412+}
15413+
15414+/**
15415+ * atomic_dec_unchecked - decrement atomic variable
15416+ * @v: pointer of type atomic_unchecked_t
15417+ *
15418+ * Atomically decrements @v by 1.
15419+ */
15420+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
15421+{
15422+ asm volatile(LOCK_PREFIX "decl %0\n"
15423 : "+m" (v->counter));
15424 }
15425
15426@@ -114,7 +221,7 @@ static inline void atomic_dec(atomic_t *v)
15427 */
15428 static inline int atomic_dec_and_test(atomic_t *v)
15429 {
15430- GEN_UNARY_RMWcc(LOCK_PREFIX "decl", v->counter, "%0", "e");
15431+ GEN_UNARY_RMWcc(LOCK_PREFIX "decl", LOCK_PREFIX "incl", v->counter, "%0", "e");
15432 }
15433
15434 /**
15435@@ -127,7 +234,20 @@ static inline int atomic_dec_and_test(atomic_t *v)
15436 */
15437 static inline int atomic_inc_and_test(atomic_t *v)
15438 {
15439- GEN_UNARY_RMWcc(LOCK_PREFIX "incl", v->counter, "%0", "e");
15440+ GEN_UNARY_RMWcc(LOCK_PREFIX "incl", LOCK_PREFIX "decl", v->counter, "%0", "e");
15441+}
15442+
15443+/**
15444+ * atomic_inc_and_test_unchecked - increment and test
15445+ * @v: pointer of type atomic_unchecked_t
15446+ *
15447+ * Atomically increments @v by 1
15448+ * and returns true if the result is zero, or false for all
15449+ * other cases.
15450+ */
15451+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
15452+{
15453+ GEN_UNARY_RMWcc_unchecked(LOCK_PREFIX "incl", v->counter, "%0", "e");
15454 }
15455
15456 /**
15457@@ -141,7 +261,7 @@ static inline int atomic_inc_and_test(atomic_t *v)
15458 */
15459 static inline int atomic_add_negative(int i, atomic_t *v)
15460 {
15461- GEN_BINARY_RMWcc(LOCK_PREFIX "addl", v->counter, "er", i, "%0", "s");
15462+ GEN_BINARY_RMWcc(LOCK_PREFIX "addl", LOCK_PREFIX "subl", v->counter, "er", i, "%0", "s");
15463 }
15464
15465 /**
15466@@ -153,6 +273,18 @@ static inline int atomic_add_negative(int i, atomic_t *v)
15467 */
15468 static inline int atomic_add_return(int i, atomic_t *v)
15469 {
15470+ return i + xadd_check_overflow(&v->counter, i);
15471+}
15472+
15473+/**
15474+ * atomic_add_return_unchecked - add integer and return
15475+ * @i: integer value to add
15476+ * @v: pointer of type atomic_unchecked_t
15477+ *
15478+ * Atomically adds @i to @v and returns @i + @v
15479+ */
15480+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
15481+{
15482 return i + xadd(&v->counter, i);
15483 }
15484
15485@@ -169,9 +301,18 @@ static inline int atomic_sub_return(int i, atomic_t *v)
15486 }
15487
15488 #define atomic_inc_return(v) (atomic_add_return(1, v))
15489+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
15490+{
15491+ return atomic_add_return_unchecked(1, v);
15492+}
15493 #define atomic_dec_return(v) (atomic_sub_return(1, v))
15494
15495-static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
15496+static inline int __intentional_overflow(-1) atomic_cmpxchg(atomic_t *v, int old, int new)
15497+{
15498+ return cmpxchg(&v->counter, old, new);
15499+}
15500+
15501+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
15502 {
15503 return cmpxchg(&v->counter, old, new);
15504 }
15505@@ -181,6 +322,11 @@ static inline int atomic_xchg(atomic_t *v, int new)
15506 return xchg(&v->counter, new);
15507 }
15508
15509+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
15510+{
15511+ return xchg(&v->counter, new);
15512+}
15513+
15514 /**
15515 * __atomic_add_unless - add unless the number is already a given value
15516 * @v: pointer of type atomic_t
15517@@ -190,14 +336,27 @@ static inline int atomic_xchg(atomic_t *v, int new)
15518 * Atomically adds @a to @v, so long as @v was not already @u.
15519 * Returns the old value of @v.
15520 */
15521-static inline int __atomic_add_unless(atomic_t *v, int a, int u)
15522+static inline int __intentional_overflow(-1) __atomic_add_unless(atomic_t *v, int a, int u)
15523 {
15524- int c, old;
15525+ int c, old, new;
15526 c = atomic_read(v);
15527 for (;;) {
15528- if (unlikely(c == (u)))
15529+ if (unlikely(c == u))
15530 break;
15531- old = atomic_cmpxchg((v), c, c + (a));
15532+
15533+ asm volatile("addl %2,%0\n"
15534+
15535+#ifdef CONFIG_PAX_REFCOUNT
15536+ "jno 0f\n"
15537+ "subl %2,%0\n"
15538+ "int $4\n0:\n"
15539+ _ASM_EXTABLE(0b, 0b)
15540+#endif
15541+
15542+ : "=r" (new)
15543+ : "0" (c), "ir" (a));
15544+
15545+ old = atomic_cmpxchg(v, c, new);
15546 if (likely(old == c))
15547 break;
15548 c = old;
15549@@ -206,6 +365,49 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
15550 }
15551
15552 /**
15553+ * atomic_inc_not_zero_hint - increment if not null
15554+ * @v: pointer of type atomic_t
15555+ * @hint: probable value of the atomic before the increment
15556+ *
15557+ * This version of atomic_inc_not_zero() gives a hint of probable
15558+ * value of the atomic. This helps processor to not read the memory
15559+ * before doing the atomic read/modify/write cycle, lowering
15560+ * number of bus transactions on some arches.
15561+ *
15562+ * Returns: 0 if increment was not done, 1 otherwise.
15563+ */
15564+#define atomic_inc_not_zero_hint atomic_inc_not_zero_hint
15565+static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint)
15566+{
15567+ int val, c = hint, new;
15568+
15569+ /* sanity test, should be removed by compiler if hint is a constant */
15570+ if (!hint)
15571+ return __atomic_add_unless(v, 1, 0);
15572+
15573+ do {
15574+ asm volatile("incl %0\n"
15575+
15576+#ifdef CONFIG_PAX_REFCOUNT
15577+ "jno 0f\n"
15578+ "decl %0\n"
15579+ "int $4\n0:\n"
15580+ _ASM_EXTABLE(0b, 0b)
15581+#endif
15582+
15583+ : "=r" (new)
15584+ : "0" (c));
15585+
15586+ val = atomic_cmpxchg(v, c, new);
15587+ if (val == c)
15588+ return 1;
15589+ c = val;
15590+ } while (c);
15591+
15592+ return 0;
15593+}
15594+
15595+/**
15596 * atomic_inc_short - increment of a short integer
15597 * @v: pointer to type int
15598 *
15599@@ -234,14 +436,37 @@ static inline void atomic_or_long(unsigned long *v1, unsigned long v2)
15600 #endif
15601
15602 /* These are x86-specific, used by some header files */
15603-#define atomic_clear_mask(mask, addr) \
15604- asm volatile(LOCK_PREFIX "andl %0,%1" \
15605- : : "r" (~(mask)), "m" (*(addr)) : "memory")
15606+static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
15607+{
15608+ asm volatile(LOCK_PREFIX "andl %1,%0"
15609+ : "+m" (v->counter)
15610+ : "r" (~(mask))
15611+ : "memory");
15612+}
15613
15614-#define atomic_set_mask(mask, addr) \
15615- asm volatile(LOCK_PREFIX "orl %0,%1" \
15616- : : "r" ((unsigned)(mask)), "m" (*(addr)) \
15617- : "memory")
15618+static inline void atomic_clear_mask_unchecked(unsigned int mask, atomic_unchecked_t *v)
15619+{
15620+ asm volatile(LOCK_PREFIX "andl %1,%0"
15621+ : "+m" (v->counter)
15622+ : "r" (~(mask))
15623+ : "memory");
15624+}
15625+
15626+static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
15627+{
15628+ asm volatile(LOCK_PREFIX "orl %1,%0"
15629+ : "+m" (v->counter)
15630+ : "r" (mask)
15631+ : "memory");
15632+}
15633+
15634+static inline void atomic_set_mask_unchecked(unsigned int mask, atomic_unchecked_t *v)
15635+{
15636+ asm volatile(LOCK_PREFIX "orl %1,%0"
15637+ : "+m" (v->counter)
15638+ : "r" (mask)
15639+ : "memory");
15640+}
15641
15642 /* Atomic operations are already serializing on x86 */
15643 #define smp_mb__before_atomic_dec() barrier()
15644diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h
15645index b154de7..bf18a5a 100644
15646--- a/arch/x86/include/asm/atomic64_32.h
15647+++ b/arch/x86/include/asm/atomic64_32.h
15648@@ -12,6 +12,14 @@ typedef struct {
15649 u64 __aligned(8) counter;
15650 } atomic64_t;
15651
15652+#ifdef CONFIG_PAX_REFCOUNT
15653+typedef struct {
15654+ u64 __aligned(8) counter;
15655+} atomic64_unchecked_t;
15656+#else
15657+typedef atomic64_t atomic64_unchecked_t;
15658+#endif
15659+
15660 #define ATOMIC64_INIT(val) { (val) }
15661
15662 #define __ATOMIC64_DECL(sym) void atomic64_##sym(atomic64_t *, ...)
15663@@ -37,21 +45,31 @@ typedef struct {
15664 ATOMIC64_DECL_ONE(sym##_386)
15665
15666 ATOMIC64_DECL_ONE(add_386);
15667+ATOMIC64_DECL_ONE(add_unchecked_386);
15668 ATOMIC64_DECL_ONE(sub_386);
15669+ATOMIC64_DECL_ONE(sub_unchecked_386);
15670 ATOMIC64_DECL_ONE(inc_386);
15671+ATOMIC64_DECL_ONE(inc_unchecked_386);
15672 ATOMIC64_DECL_ONE(dec_386);
15673+ATOMIC64_DECL_ONE(dec_unchecked_386);
15674 #endif
15675
15676 #define alternative_atomic64(f, out, in...) \
15677 __alternative_atomic64(f, f, ASM_OUTPUT2(out), ## in)
15678
15679 ATOMIC64_DECL(read);
15680+ATOMIC64_DECL(read_unchecked);
15681 ATOMIC64_DECL(set);
15682+ATOMIC64_DECL(set_unchecked);
15683 ATOMIC64_DECL(xchg);
15684 ATOMIC64_DECL(add_return);
15685+ATOMIC64_DECL(add_return_unchecked);
15686 ATOMIC64_DECL(sub_return);
15687+ATOMIC64_DECL(sub_return_unchecked);
15688 ATOMIC64_DECL(inc_return);
15689+ATOMIC64_DECL(inc_return_unchecked);
15690 ATOMIC64_DECL(dec_return);
15691+ATOMIC64_DECL(dec_return_unchecked);
15692 ATOMIC64_DECL(dec_if_positive);
15693 ATOMIC64_DECL(inc_not_zero);
15694 ATOMIC64_DECL(add_unless);
15695@@ -77,6 +95,21 @@ static inline long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n
15696 }
15697
15698 /**
15699+ * atomic64_cmpxchg_unchecked - cmpxchg atomic64 variable
15700+ * @p: pointer to type atomic64_unchecked_t
15701+ * @o: expected value
15702+ * @n: new value
15703+ *
15704+ * Atomically sets @v to @n if it was equal to @o and returns
15705+ * the old value.
15706+ */
15707+
15708+static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long long o, long long n)
15709+{
15710+ return cmpxchg64(&v->counter, o, n);
15711+}
15712+
15713+/**
15714 * atomic64_xchg - xchg atomic64 variable
15715 * @v: pointer to type atomic64_t
15716 * @n: value to assign
15717@@ -112,6 +145,22 @@ static inline void atomic64_set(atomic64_t *v, long long i)
15718 }
15719
15720 /**
15721+ * atomic64_set_unchecked - set atomic64 variable
15722+ * @v: pointer to type atomic64_unchecked_t
15723+ * @n: value to assign
15724+ *
15725+ * Atomically sets the value of @v to @n.
15726+ */
15727+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
15728+{
15729+ unsigned high = (unsigned)(i >> 32);
15730+ unsigned low = (unsigned)i;
15731+ alternative_atomic64(set, /* no output */,
15732+ "S" (v), "b" (low), "c" (high)
15733+ : "eax", "edx", "memory");
15734+}
15735+
15736+/**
15737 * atomic64_read - read atomic64 variable
15738 * @v: pointer to type atomic64_t
15739 *
15740@@ -125,6 +174,19 @@ static inline long long atomic64_read(const atomic64_t *v)
15741 }
15742
15743 /**
15744+ * atomic64_read_unchecked - read atomic64 variable
15745+ * @v: pointer to type atomic64_unchecked_t
15746+ *
15747+ * Atomically reads the value of @v and returns it.
15748+ */
15749+static inline long long __intentional_overflow(-1) atomic64_read_unchecked(atomic64_unchecked_t *v)
15750+{
15751+ long long r;
15752+ alternative_atomic64(read, "=&A" (r), "c" (v) : "memory");
15753+ return r;
15754+ }
15755+
15756+/**
15757 * atomic64_add_return - add and return
15758 * @i: integer value to add
15759 * @v: pointer to type atomic64_t
15760@@ -139,6 +201,21 @@ static inline long long atomic64_add_return(long long i, atomic64_t *v)
15761 return i;
15762 }
15763
15764+/**
15765+ * atomic64_add_return_unchecked - add and return
15766+ * @i: integer value to add
15767+ * @v: pointer to type atomic64_unchecked_t
15768+ *
15769+ * Atomically adds @i to @v and returns @i + *@v
15770+ */
15771+static inline long long atomic64_add_return_unchecked(long long i, atomic64_unchecked_t *v)
15772+{
15773+ alternative_atomic64(add_return_unchecked,
15774+ ASM_OUTPUT2("+A" (i), "+c" (v)),
15775+ ASM_NO_INPUT_CLOBBER("memory"));
15776+ return i;
15777+}
15778+
15779 /*
15780 * Other variants with different arithmetic operators:
15781 */
15782@@ -158,6 +235,14 @@ static inline long long atomic64_inc_return(atomic64_t *v)
15783 return a;
15784 }
15785
15786+static inline long long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
15787+{
15788+ long long a;
15789+ alternative_atomic64(inc_return_unchecked, "=&A" (a),
15790+ "S" (v) : "memory", "ecx");
15791+ return a;
15792+}
15793+
15794 static inline long long atomic64_dec_return(atomic64_t *v)
15795 {
15796 long long a;
15797@@ -182,6 +267,21 @@ static inline long long atomic64_add(long long i, atomic64_t *v)
15798 }
15799
15800 /**
15801+ * atomic64_add_unchecked - add integer to atomic64 variable
15802+ * @i: integer value to add
15803+ * @v: pointer to type atomic64_unchecked_t
15804+ *
15805+ * Atomically adds @i to @v.
15806+ */
15807+static inline long long atomic64_add_unchecked(long long i, atomic64_unchecked_t *v)
15808+{
15809+ __alternative_atomic64(add_unchecked, add_return_unchecked,
15810+ ASM_OUTPUT2("+A" (i), "+c" (v)),
15811+ ASM_NO_INPUT_CLOBBER("memory"));
15812+ return i;
15813+}
15814+
15815+/**
15816 * atomic64_sub - subtract the atomic64 variable
15817 * @i: integer value to subtract
15818 * @v: pointer to type atomic64_t
15819diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h
15820index 46e9052..ae45136 100644
15821--- a/arch/x86/include/asm/atomic64_64.h
15822+++ b/arch/x86/include/asm/atomic64_64.h
15823@@ -18,7 +18,19 @@
15824 */
15825 static inline long atomic64_read(const atomic64_t *v)
15826 {
15827- return (*(volatile long *)&(v)->counter);
15828+ return (*(volatile const long *)&(v)->counter);
15829+}
15830+
15831+/**
15832+ * atomic64_read_unchecked - read atomic64 variable
15833+ * @v: pointer of type atomic64_unchecked_t
15834+ *
15835+ * Atomically reads the value of @v.
15836+ * Doesn't imply a read memory barrier.
15837+ */
15838+static inline long __intentional_overflow(-1) atomic64_read_unchecked(const atomic64_unchecked_t *v)
15839+{
15840+ return (*(volatile const long *)&(v)->counter);
15841 }
15842
15843 /**
15844@@ -34,6 +46,18 @@ static inline void atomic64_set(atomic64_t *v, long i)
15845 }
15846
15847 /**
15848+ * atomic64_set_unchecked - set atomic64 variable
15849+ * @v: pointer to type atomic64_unchecked_t
15850+ * @i: required value
15851+ *
15852+ * Atomically sets the value of @v to @i.
15853+ */
15854+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
15855+{
15856+ v->counter = i;
15857+}
15858+
15859+/**
15860 * atomic64_add - add integer to atomic64 variable
15861 * @i: integer value to add
15862 * @v: pointer to type atomic64_t
15863@@ -42,6 +66,28 @@ static inline void atomic64_set(atomic64_t *v, long i)
15864 */
15865 static inline void atomic64_add(long i, atomic64_t *v)
15866 {
15867+ asm volatile(LOCK_PREFIX "addq %1,%0\n"
15868+
15869+#ifdef CONFIG_PAX_REFCOUNT
15870+ "jno 0f\n"
15871+ LOCK_PREFIX "subq %1,%0\n"
15872+ "int $4\n0:\n"
15873+ _ASM_EXTABLE(0b, 0b)
15874+#endif
15875+
15876+ : "=m" (v->counter)
15877+ : "er" (i), "m" (v->counter));
15878+}
15879+
15880+/**
15881+ * atomic64_add_unchecked - add integer to atomic64 variable
15882+ * @i: integer value to add
15883+ * @v: pointer to type atomic64_unchecked_t
15884+ *
15885+ * Atomically adds @i to @v.
15886+ */
15887+static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
15888+{
15889 asm volatile(LOCK_PREFIX "addq %1,%0"
15890 : "=m" (v->counter)
15891 : "er" (i), "m" (v->counter));
15892@@ -56,7 +102,29 @@ static inline void atomic64_add(long i, atomic64_t *v)
15893 */
15894 static inline void atomic64_sub(long i, atomic64_t *v)
15895 {
15896- asm volatile(LOCK_PREFIX "subq %1,%0"
15897+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
15898+
15899+#ifdef CONFIG_PAX_REFCOUNT
15900+ "jno 0f\n"
15901+ LOCK_PREFIX "addq %1,%0\n"
15902+ "int $4\n0:\n"
15903+ _ASM_EXTABLE(0b, 0b)
15904+#endif
15905+
15906+ : "=m" (v->counter)
15907+ : "er" (i), "m" (v->counter));
15908+}
15909+
15910+/**
15911+ * atomic64_sub_unchecked - subtract the atomic64 variable
15912+ * @i: integer value to subtract
15913+ * @v: pointer to type atomic64_unchecked_t
15914+ *
15915+ * Atomically subtracts @i from @v.
15916+ */
15917+static inline void atomic64_sub_unchecked(long i, atomic64_unchecked_t *v)
15918+{
15919+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
15920 : "=m" (v->counter)
15921 : "er" (i), "m" (v->counter));
15922 }
15923@@ -72,7 +140,7 @@ static inline void atomic64_sub(long i, atomic64_t *v)
15924 */
15925 static inline int atomic64_sub_and_test(long i, atomic64_t *v)
15926 {
15927- GEN_BINARY_RMWcc(LOCK_PREFIX "subq", v->counter, "er", i, "%0", "e");
15928+ GEN_BINARY_RMWcc(LOCK_PREFIX "subq", LOCK_PREFIX "addq", v->counter, "er", i, "%0", "e");
15929 }
15930
15931 /**
15932@@ -83,6 +151,27 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
15933 */
15934 static inline void atomic64_inc(atomic64_t *v)
15935 {
15936+ asm volatile(LOCK_PREFIX "incq %0\n"
15937+
15938+#ifdef CONFIG_PAX_REFCOUNT
15939+ "jno 0f\n"
15940+ LOCK_PREFIX "decq %0\n"
15941+ "int $4\n0:\n"
15942+ _ASM_EXTABLE(0b, 0b)
15943+#endif
15944+
15945+ : "=m" (v->counter)
15946+ : "m" (v->counter));
15947+}
15948+
15949+/**
15950+ * atomic64_inc_unchecked - increment atomic64 variable
15951+ * @v: pointer to type atomic64_unchecked_t
15952+ *
15953+ * Atomically increments @v by 1.
15954+ */
15955+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
15956+{
15957 asm volatile(LOCK_PREFIX "incq %0"
15958 : "=m" (v->counter)
15959 : "m" (v->counter));
15960@@ -96,7 +185,28 @@ static inline void atomic64_inc(atomic64_t *v)
15961 */
15962 static inline void atomic64_dec(atomic64_t *v)
15963 {
15964- asm volatile(LOCK_PREFIX "decq %0"
15965+ asm volatile(LOCK_PREFIX "decq %0\n"
15966+
15967+#ifdef CONFIG_PAX_REFCOUNT
15968+ "jno 0f\n"
15969+ LOCK_PREFIX "incq %0\n"
15970+ "int $4\n0:\n"
15971+ _ASM_EXTABLE(0b, 0b)
15972+#endif
15973+
15974+ : "=m" (v->counter)
15975+ : "m" (v->counter));
15976+}
15977+
15978+/**
15979+ * atomic64_dec_unchecked - decrement atomic64 variable
15980+ * @v: pointer to type atomic64_t
15981+ *
15982+ * Atomically decrements @v by 1.
15983+ */
15984+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
15985+{
15986+ asm volatile(LOCK_PREFIX "decq %0\n"
15987 : "=m" (v->counter)
15988 : "m" (v->counter));
15989 }
15990@@ -111,7 +221,7 @@ static inline void atomic64_dec(atomic64_t *v)
15991 */
15992 static inline int atomic64_dec_and_test(atomic64_t *v)
15993 {
15994- GEN_UNARY_RMWcc(LOCK_PREFIX "decq", v->counter, "%0", "e");
15995+ GEN_UNARY_RMWcc(LOCK_PREFIX "decq", LOCK_PREFIX "incq", v->counter, "%0", "e");
15996 }
15997
15998 /**
15999@@ -124,7 +234,7 @@ static inline int atomic64_dec_and_test(atomic64_t *v)
16000 */
16001 static inline int atomic64_inc_and_test(atomic64_t *v)
16002 {
16003- GEN_UNARY_RMWcc(LOCK_PREFIX "incq", v->counter, "%0", "e");
16004+ GEN_UNARY_RMWcc(LOCK_PREFIX "incq", LOCK_PREFIX "decq", v->counter, "%0", "e");
16005 }
16006
16007 /**
16008@@ -138,7 +248,7 @@ static inline int atomic64_inc_and_test(atomic64_t *v)
16009 */
16010 static inline int atomic64_add_negative(long i, atomic64_t *v)
16011 {
16012- GEN_BINARY_RMWcc(LOCK_PREFIX "addq", v->counter, "er", i, "%0", "s");
16013+ GEN_BINARY_RMWcc(LOCK_PREFIX "addq", LOCK_PREFIX "subq", v->counter, "er", i, "%0", "s");
16014 }
16015
16016 /**
16017@@ -150,6 +260,18 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
16018 */
16019 static inline long atomic64_add_return(long i, atomic64_t *v)
16020 {
16021+ return i + xadd_check_overflow(&v->counter, i);
16022+}
16023+
16024+/**
16025+ * atomic64_add_return_unchecked - add and return
16026+ * @i: integer value to add
16027+ * @v: pointer to type atomic64_unchecked_t
16028+ *
16029+ * Atomically adds @i to @v and returns @i + @v
16030+ */
16031+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
16032+{
16033 return i + xadd(&v->counter, i);
16034 }
16035
16036@@ -159,6 +281,10 @@ static inline long atomic64_sub_return(long i, atomic64_t *v)
16037 }
16038
16039 #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
16040+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
16041+{
16042+ return atomic64_add_return_unchecked(1, v);
16043+}
16044 #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
16045
16046 static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
16047@@ -166,6 +292,11 @@ static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
16048 return cmpxchg(&v->counter, old, new);
16049 }
16050
16051+static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
16052+{
16053+ return cmpxchg(&v->counter, old, new);
16054+}
16055+
16056 static inline long atomic64_xchg(atomic64_t *v, long new)
16057 {
16058 return xchg(&v->counter, new);
16059@@ -182,17 +313,30 @@ static inline long atomic64_xchg(atomic64_t *v, long new)
16060 */
16061 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
16062 {
16063- long c, old;
16064+ long c, old, new;
16065 c = atomic64_read(v);
16066 for (;;) {
16067- if (unlikely(c == (u)))
16068+ if (unlikely(c == u))
16069 break;
16070- old = atomic64_cmpxchg((v), c, c + (a));
16071+
16072+ asm volatile("add %2,%0\n"
16073+
16074+#ifdef CONFIG_PAX_REFCOUNT
16075+ "jno 0f\n"
16076+ "sub %2,%0\n"
16077+ "int $4\n0:\n"
16078+ _ASM_EXTABLE(0b, 0b)
16079+#endif
16080+
16081+ : "=r" (new)
16082+ : "0" (c), "ir" (a));
16083+
16084+ old = atomic64_cmpxchg(v, c, new);
16085 if (likely(old == c))
16086 break;
16087 c = old;
16088 }
16089- return c != (u);
16090+ return c != u;
16091 }
16092
16093 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
16094diff --git a/arch/x86/include/asm/barrier.h b/arch/x86/include/asm/barrier.h
16095index 69bbb48..32517fe 100644
16096--- a/arch/x86/include/asm/barrier.h
16097+++ b/arch/x86/include/asm/barrier.h
16098@@ -107,7 +107,7 @@
16099 do { \
16100 compiletime_assert_atomic_type(*p); \
16101 smp_mb(); \
16102- ACCESS_ONCE(*p) = (v); \
16103+ ACCESS_ONCE_RW(*p) = (v); \
16104 } while (0)
16105
16106 #define smp_load_acquire(p) \
16107@@ -124,7 +124,7 @@ do { \
16108 do { \
16109 compiletime_assert_atomic_type(*p); \
16110 barrier(); \
16111- ACCESS_ONCE(*p) = (v); \
16112+ ACCESS_ONCE_RW(*p) = (v); \
16113 } while (0)
16114
16115 #define smp_load_acquire(p) \
16116diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
16117index 9fc1af7..fc71228 100644
16118--- a/arch/x86/include/asm/bitops.h
16119+++ b/arch/x86/include/asm/bitops.h
16120@@ -49,7 +49,7 @@
16121 * a mask operation on a byte.
16122 */
16123 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
16124-#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
16125+#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
16126 #define CONST_MASK(nr) (1 << ((nr) & 7))
16127
16128 /**
16129@@ -205,7 +205,7 @@ static inline void change_bit(long nr, volatile unsigned long *addr)
16130 */
16131 static inline int test_and_set_bit(long nr, volatile unsigned long *addr)
16132 {
16133- GEN_BINARY_RMWcc(LOCK_PREFIX "bts", *addr, "Ir", nr, "%0", "c");
16134+ GEN_BINARY_RMWcc_unchecked(LOCK_PREFIX "bts", *addr, "Ir", nr, "%0", "c");
16135 }
16136
16137 /**
16138@@ -251,7 +251,7 @@ static inline int __test_and_set_bit(long nr, volatile unsigned long *addr)
16139 */
16140 static inline int test_and_clear_bit(long nr, volatile unsigned long *addr)
16141 {
16142- GEN_BINARY_RMWcc(LOCK_PREFIX "btr", *addr, "Ir", nr, "%0", "c");
16143+ GEN_BINARY_RMWcc_unchecked(LOCK_PREFIX "btr", *addr, "Ir", nr, "%0", "c");
16144 }
16145
16146 /**
16147@@ -304,7 +304,7 @@ static inline int __test_and_change_bit(long nr, volatile unsigned long *addr)
16148 */
16149 static inline int test_and_change_bit(long nr, volatile unsigned long *addr)
16150 {
16151- GEN_BINARY_RMWcc(LOCK_PREFIX "btc", *addr, "Ir", nr, "%0", "c");
16152+ GEN_BINARY_RMWcc_unchecked(LOCK_PREFIX "btc", *addr, "Ir", nr, "%0", "c");
16153 }
16154
16155 static __always_inline int constant_test_bit(long nr, const volatile unsigned long *addr)
16156@@ -345,7 +345,7 @@ static int test_bit(int nr, const volatile unsigned long *addr);
16157 *
16158 * Undefined if no bit exists, so code should check against 0 first.
16159 */
16160-static inline unsigned long __ffs(unsigned long word)
16161+static inline unsigned long __intentional_overflow(-1) __ffs(unsigned long word)
16162 {
16163 asm("rep; bsf %1,%0"
16164 : "=r" (word)
16165@@ -359,7 +359,7 @@ static inline unsigned long __ffs(unsigned long word)
16166 *
16167 * Undefined if no zero exists, so code should check against ~0UL first.
16168 */
16169-static inline unsigned long ffz(unsigned long word)
16170+static inline unsigned long __intentional_overflow(-1) ffz(unsigned long word)
16171 {
16172 asm("rep; bsf %1,%0"
16173 : "=r" (word)
16174@@ -373,7 +373,7 @@ static inline unsigned long ffz(unsigned long word)
16175 *
16176 * Undefined if no set bit exists, so code should check against 0 first.
16177 */
16178-static inline unsigned long __fls(unsigned long word)
16179+static inline unsigned long __intentional_overflow(-1) __fls(unsigned long word)
16180 {
16181 asm("bsr %1,%0"
16182 : "=r" (word)
16183@@ -436,7 +436,7 @@ static inline int ffs(int x)
16184 * set bit if value is nonzero. The last (most significant) bit is
16185 * at position 32.
16186 */
16187-static inline int fls(int x)
16188+static inline int __intentional_overflow(-1) fls(int x)
16189 {
16190 int r;
16191
16192@@ -478,7 +478,7 @@ static inline int fls(int x)
16193 * at position 64.
16194 */
16195 #ifdef CONFIG_X86_64
16196-static __always_inline int fls64(__u64 x)
16197+static __always_inline long fls64(__u64 x)
16198 {
16199 int bitpos = -1;
16200 /*
16201diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h
16202index 4fa687a..60f2d39 100644
16203--- a/arch/x86/include/asm/boot.h
16204+++ b/arch/x86/include/asm/boot.h
16205@@ -6,10 +6,15 @@
16206 #include <uapi/asm/boot.h>
16207
16208 /* Physical address where kernel should be loaded. */
16209-#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
16210+#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
16211 + (CONFIG_PHYSICAL_ALIGN - 1)) \
16212 & ~(CONFIG_PHYSICAL_ALIGN - 1))
16213
16214+#ifndef __ASSEMBLY__
16215+extern unsigned char __LOAD_PHYSICAL_ADDR[];
16216+#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
16217+#endif
16218+
16219 /* Minimum kernel alignment, as a power of two */
16220 #ifdef CONFIG_X86_64
16221 #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
16222diff --git a/arch/x86/include/asm/cache.h b/arch/x86/include/asm/cache.h
16223index 48f99f1..d78ebf9 100644
16224--- a/arch/x86/include/asm/cache.h
16225+++ b/arch/x86/include/asm/cache.h
16226@@ -5,12 +5,13 @@
16227
16228 /* L1 cache line size */
16229 #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
16230-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
16231+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
16232
16233 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
16234+#define __read_only __attribute__((__section__(".data..read_only")))
16235
16236 #define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT
16237-#define INTERNODE_CACHE_BYTES (1 << INTERNODE_CACHE_SHIFT)
16238+#define INTERNODE_CACHE_BYTES (_AC(1,UL) << INTERNODE_CACHE_SHIFT)
16239
16240 #ifdef CONFIG_X86_VSMP
16241 #ifdef CONFIG_SMP
16242diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h
16243index 9863ee3..4a1f8e1 100644
16244--- a/arch/x86/include/asm/cacheflush.h
16245+++ b/arch/x86/include/asm/cacheflush.h
16246@@ -27,7 +27,7 @@ static inline unsigned long get_page_memtype(struct page *pg)
16247 unsigned long pg_flags = pg->flags & _PGMT_MASK;
16248
16249 if (pg_flags == _PGMT_DEFAULT)
16250- return -1;
16251+ return ~0UL;
16252 else if (pg_flags == _PGMT_WC)
16253 return _PAGE_CACHE_WC;
16254 else if (pg_flags == _PGMT_UC_MINUS)
16255diff --git a/arch/x86/include/asm/calling.h b/arch/x86/include/asm/calling.h
16256index cb4c73b..c473c29 100644
16257--- a/arch/x86/include/asm/calling.h
16258+++ b/arch/x86/include/asm/calling.h
16259@@ -82,103 +82,113 @@ For 32-bit we have the following conventions - kernel is built with
16260 #define RSP 152
16261 #define SS 160
16262
16263-#define ARGOFFSET R11
16264-#define SWFRAME ORIG_RAX
16265+#define ARGOFFSET R15
16266
16267 .macro SAVE_ARGS addskip=0, save_rcx=1, save_r891011=1
16268- subq $9*8+\addskip, %rsp
16269- CFI_ADJUST_CFA_OFFSET 9*8+\addskip
16270- movq_cfi rdi, 8*8
16271- movq_cfi rsi, 7*8
16272- movq_cfi rdx, 6*8
16273+ subq $ORIG_RAX-ARGOFFSET+\addskip, %rsp
16274+ CFI_ADJUST_CFA_OFFSET ORIG_RAX-ARGOFFSET+\addskip
16275+ movq_cfi rdi, RDI
16276+ movq_cfi rsi, RSI
16277+ movq_cfi rdx, RDX
16278
16279 .if \save_rcx
16280- movq_cfi rcx, 5*8
16281+ movq_cfi rcx, RCX
16282 .endif
16283
16284- movq_cfi rax, 4*8
16285+ movq_cfi rax, RAX
16286
16287 .if \save_r891011
16288- movq_cfi r8, 3*8
16289- movq_cfi r9, 2*8
16290- movq_cfi r10, 1*8
16291- movq_cfi r11, 0*8
16292+ movq_cfi r8, R8
16293+ movq_cfi r9, R9
16294+ movq_cfi r10, R10
16295+ movq_cfi r11, R11
16296 .endif
16297
16298+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
16299+ movq_cfi r12, R12
16300+#endif
16301+
16302 .endm
16303
16304-#define ARG_SKIP (9*8)
16305+#define ARG_SKIP ORIG_RAX
16306
16307 .macro RESTORE_ARGS rstor_rax=1, addskip=0, rstor_rcx=1, rstor_r11=1, \
16308 rstor_r8910=1, rstor_rdx=1
16309+
16310+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
16311+ movq_cfi_restore R12, r12
16312+#endif
16313+
16314 .if \rstor_r11
16315- movq_cfi_restore 0*8, r11
16316+ movq_cfi_restore R11, r11
16317 .endif
16318
16319 .if \rstor_r8910
16320- movq_cfi_restore 1*8, r10
16321- movq_cfi_restore 2*8, r9
16322- movq_cfi_restore 3*8, r8
16323+ movq_cfi_restore R10, r10
16324+ movq_cfi_restore R9, r9
16325+ movq_cfi_restore R8, r8
16326 .endif
16327
16328 .if \rstor_rax
16329- movq_cfi_restore 4*8, rax
16330+ movq_cfi_restore RAX, rax
16331 .endif
16332
16333 .if \rstor_rcx
16334- movq_cfi_restore 5*8, rcx
16335+ movq_cfi_restore RCX, rcx
16336 .endif
16337
16338 .if \rstor_rdx
16339- movq_cfi_restore 6*8, rdx
16340+ movq_cfi_restore RDX, rdx
16341 .endif
16342
16343- movq_cfi_restore 7*8, rsi
16344- movq_cfi_restore 8*8, rdi
16345+ movq_cfi_restore RSI, rsi
16346+ movq_cfi_restore RDI, rdi
16347
16348- .if ARG_SKIP+\addskip > 0
16349- addq $ARG_SKIP+\addskip, %rsp
16350- CFI_ADJUST_CFA_OFFSET -(ARG_SKIP+\addskip)
16351+ .if ORIG_RAX+\addskip > 0
16352+ addq $ORIG_RAX+\addskip, %rsp
16353+ CFI_ADJUST_CFA_OFFSET -(ORIG_RAX+\addskip)
16354 .endif
16355 .endm
16356
16357- .macro LOAD_ARGS offset, skiprax=0
16358- movq \offset(%rsp), %r11
16359- movq \offset+8(%rsp), %r10
16360- movq \offset+16(%rsp), %r9
16361- movq \offset+24(%rsp), %r8
16362- movq \offset+40(%rsp), %rcx
16363- movq \offset+48(%rsp), %rdx
16364- movq \offset+56(%rsp), %rsi
16365- movq \offset+64(%rsp), %rdi
16366+ .macro LOAD_ARGS skiprax=0
16367+ movq R11(%rsp), %r11
16368+ movq R10(%rsp), %r10
16369+ movq R9(%rsp), %r9
16370+ movq R8(%rsp), %r8
16371+ movq RCX(%rsp), %rcx
16372+ movq RDX(%rsp), %rdx
16373+ movq RSI(%rsp), %rsi
16374+ movq RDI(%rsp), %rdi
16375 .if \skiprax
16376 .else
16377- movq \offset+72(%rsp), %rax
16378+ movq RAX(%rsp), %rax
16379 .endif
16380 .endm
16381
16382-#define REST_SKIP (6*8)
16383-
16384 .macro SAVE_REST
16385- subq $REST_SKIP, %rsp
16386- CFI_ADJUST_CFA_OFFSET REST_SKIP
16387- movq_cfi rbx, 5*8
16388- movq_cfi rbp, 4*8
16389- movq_cfi r12, 3*8
16390- movq_cfi r13, 2*8
16391- movq_cfi r14, 1*8
16392- movq_cfi r15, 0*8
16393+ movq_cfi rbx, RBX
16394+ movq_cfi rbp, RBP
16395+
16396+#ifndef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
16397+ movq_cfi r12, R12
16398+#endif
16399+
16400+ movq_cfi r13, R13
16401+ movq_cfi r14, R14
16402+ movq_cfi r15, R15
16403 .endm
16404
16405 .macro RESTORE_REST
16406- movq_cfi_restore 0*8, r15
16407- movq_cfi_restore 1*8, r14
16408- movq_cfi_restore 2*8, r13
16409- movq_cfi_restore 3*8, r12
16410- movq_cfi_restore 4*8, rbp
16411- movq_cfi_restore 5*8, rbx
16412- addq $REST_SKIP, %rsp
16413- CFI_ADJUST_CFA_OFFSET -(REST_SKIP)
16414+ movq_cfi_restore R15, r15
16415+ movq_cfi_restore R14, r14
16416+ movq_cfi_restore R13, r13
16417+
16418+#ifndef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
16419+ movq_cfi_restore R12, r12
16420+#endif
16421+
16422+ movq_cfi_restore RBP, rbp
16423+ movq_cfi_restore RBX, rbx
16424 .endm
16425
16426 .macro SAVE_ALL
16427diff --git a/arch/x86/include/asm/checksum_32.h b/arch/x86/include/asm/checksum_32.h
16428index f50de69..2b0a458 100644
16429--- a/arch/x86/include/asm/checksum_32.h
16430+++ b/arch/x86/include/asm/checksum_32.h
16431@@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
16432 int len, __wsum sum,
16433 int *src_err_ptr, int *dst_err_ptr);
16434
16435+asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
16436+ int len, __wsum sum,
16437+ int *src_err_ptr, int *dst_err_ptr);
16438+
16439+asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
16440+ int len, __wsum sum,
16441+ int *src_err_ptr, int *dst_err_ptr);
16442+
16443 /*
16444 * Note: when you get a NULL pointer exception here this means someone
16445 * passed in an incorrect kernel address to one of these functions.
16446@@ -53,7 +61,7 @@ static inline __wsum csum_partial_copy_from_user(const void __user *src,
16447
16448 might_sleep();
16449 stac();
16450- ret = csum_partial_copy_generic((__force void *)src, dst,
16451+ ret = csum_partial_copy_generic_from_user((__force void *)src, dst,
16452 len, sum, err_ptr, NULL);
16453 clac();
16454
16455@@ -187,7 +195,7 @@ static inline __wsum csum_and_copy_to_user(const void *src,
16456 might_sleep();
16457 if (access_ok(VERIFY_WRITE, dst, len)) {
16458 stac();
16459- ret = csum_partial_copy_generic(src, (__force void *)dst,
16460+ ret = csum_partial_copy_generic_to_user(src, (__force void *)dst,
16461 len, sum, NULL, err_ptr);
16462 clac();
16463 return ret;
16464diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h
16465index d47786a..2d8883e 100644
16466--- a/arch/x86/include/asm/cmpxchg.h
16467+++ b/arch/x86/include/asm/cmpxchg.h
16468@@ -14,8 +14,12 @@ extern void __cmpxchg_wrong_size(void)
16469 __compiletime_error("Bad argument size for cmpxchg");
16470 extern void __xadd_wrong_size(void)
16471 __compiletime_error("Bad argument size for xadd");
16472+extern void __xadd_check_overflow_wrong_size(void)
16473+ __compiletime_error("Bad argument size for xadd_check_overflow");
16474 extern void __add_wrong_size(void)
16475 __compiletime_error("Bad argument size for add");
16476+extern void __add_check_overflow_wrong_size(void)
16477+ __compiletime_error("Bad argument size for add_check_overflow");
16478
16479 /*
16480 * Constants for operation sizes. On 32-bit, the 64-bit size it set to
16481@@ -67,6 +71,38 @@ extern void __add_wrong_size(void)
16482 __ret; \
16483 })
16484
16485+#ifdef CONFIG_PAX_REFCOUNT
16486+#define __xchg_op_check_overflow(ptr, arg, op, lock) \
16487+ ({ \
16488+ __typeof__ (*(ptr)) __ret = (arg); \
16489+ switch (sizeof(*(ptr))) { \
16490+ case __X86_CASE_L: \
16491+ asm volatile (lock #op "l %0, %1\n" \
16492+ "jno 0f\n" \
16493+ "mov %0,%1\n" \
16494+ "int $4\n0:\n" \
16495+ _ASM_EXTABLE(0b, 0b) \
16496+ : "+r" (__ret), "+m" (*(ptr)) \
16497+ : : "memory", "cc"); \
16498+ break; \
16499+ case __X86_CASE_Q: \
16500+ asm volatile (lock #op "q %q0, %1\n" \
16501+ "jno 0f\n" \
16502+ "mov %0,%1\n" \
16503+ "int $4\n0:\n" \
16504+ _ASM_EXTABLE(0b, 0b) \
16505+ : "+r" (__ret), "+m" (*(ptr)) \
16506+ : : "memory", "cc"); \
16507+ break; \
16508+ default: \
16509+ __ ## op ## _check_overflow_wrong_size(); \
16510+ } \
16511+ __ret; \
16512+ })
16513+#else
16514+#define __xchg_op_check_overflow(ptr, arg, op, lock) __xchg_op(ptr, arg, op, lock)
16515+#endif
16516+
16517 /*
16518 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway.
16519 * Since this is generally used to protect other memory information, we
16520@@ -167,6 +203,9 @@ extern void __add_wrong_size(void)
16521 #define xadd_sync(ptr, inc) __xadd((ptr), (inc), "lock; ")
16522 #define xadd_local(ptr, inc) __xadd((ptr), (inc), "")
16523
16524+#define __xadd_check_overflow(ptr, inc, lock) __xchg_op_check_overflow((ptr), (inc), xadd, lock)
16525+#define xadd_check_overflow(ptr, inc) __xadd_check_overflow((ptr), (inc), LOCK_PREFIX)
16526+
16527 #define __add(ptr, inc, lock) \
16528 ({ \
16529 __typeof__ (*(ptr)) __ret = (inc); \
16530diff --git a/arch/x86/include/asm/compat.h b/arch/x86/include/asm/compat.h
16531index 59c6c40..5e0b22c 100644
16532--- a/arch/x86/include/asm/compat.h
16533+++ b/arch/x86/include/asm/compat.h
16534@@ -41,7 +41,7 @@ typedef s64 __attribute__((aligned(4))) compat_s64;
16535 typedef u32 compat_uint_t;
16536 typedef u32 compat_ulong_t;
16537 typedef u64 __attribute__((aligned(4))) compat_u64;
16538-typedef u32 compat_uptr_t;
16539+typedef u32 __user compat_uptr_t;
16540
16541 struct compat_timespec {
16542 compat_time_t tv_sec;
16543diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
16544index 5f12968..a383517 100644
16545--- a/arch/x86/include/asm/cpufeature.h
16546+++ b/arch/x86/include/asm/cpufeature.h
16547@@ -203,7 +203,7 @@
16548 #define X86_FEATURE_DECODEASSISTS (8*32+12) /* AMD Decode Assists support */
16549 #define X86_FEATURE_PAUSEFILTER (8*32+13) /* AMD filtered pause intercept */
16550 #define X86_FEATURE_PFTHRESHOLD (8*32+14) /* AMD pause filter threshold */
16551-
16552+#define X86_FEATURE_STRONGUDEREF (8*32+31) /* PaX PCID based strong UDEREF */
16553
16554 /* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 9 */
16555 #define X86_FEATURE_FSGSBASE (9*32+ 0) /* {RD/WR}{FS/GS}BASE instructions*/
16556@@ -211,7 +211,7 @@
16557 #define X86_FEATURE_BMI1 (9*32+ 3) /* 1st group bit manipulation extensions */
16558 #define X86_FEATURE_HLE (9*32+ 4) /* Hardware Lock Elision */
16559 #define X86_FEATURE_AVX2 (9*32+ 5) /* AVX2 instructions */
16560-#define X86_FEATURE_SMEP (9*32+ 7) /* Supervisor Mode Execution Protection */
16561+#define X86_FEATURE_SMEP (9*32+ 7) /* Supervisor Mode Execution Prevention */
16562 #define X86_FEATURE_BMI2 (9*32+ 8) /* 2nd group bit manipulation extensions */
16563 #define X86_FEATURE_ERMS (9*32+ 9) /* Enhanced REP MOVSB/STOSB */
16564 #define X86_FEATURE_INVPCID (9*32+10) /* Invalidate Processor Context ID */
16565@@ -358,6 +358,7 @@ extern const char * const x86_power_flags[32];
16566 #undef cpu_has_centaur_mcr
16567 #define cpu_has_centaur_mcr 0
16568
16569+#define cpu_has_pcid boot_cpu_has(X86_FEATURE_PCID)
16570 #endif /* CONFIG_X86_64 */
16571
16572 #if __GNUC__ >= 4
16573@@ -410,7 +411,8 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
16574
16575 #ifdef CONFIG_X86_DEBUG_STATIC_CPU_HAS
16576 t_warn:
16577- warn_pre_alternatives();
16578+ if (bit != X86_FEATURE_PCID && bit != X86_FEATURE_INVPCID)
16579+ warn_pre_alternatives();
16580 return false;
16581 #endif
16582
16583@@ -430,7 +432,7 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
16584 ".section .discard,\"aw\",@progbits\n"
16585 " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
16586 ".previous\n"
16587- ".section .altinstr_replacement,\"ax\"\n"
16588+ ".section .altinstr_replacement,\"a\"\n"
16589 "3: movb $1,%0\n"
16590 "4:\n"
16591 ".previous\n"
16592@@ -467,7 +469,7 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
16593 " .byte 2b - 1b\n" /* src len */
16594 " .byte 4f - 3f\n" /* repl len */
16595 ".previous\n"
16596- ".section .altinstr_replacement,\"ax\"\n"
16597+ ".section .altinstr_replacement,\"a\"\n"
16598 "3: .byte 0xe9\n .long %l[t_no] - 2b\n"
16599 "4:\n"
16600 ".previous\n"
16601@@ -500,7 +502,7 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
16602 ".section .discard,\"aw\",@progbits\n"
16603 " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
16604 ".previous\n"
16605- ".section .altinstr_replacement,\"ax\"\n"
16606+ ".section .altinstr_replacement,\"a\"\n"
16607 "3: movb $0,%0\n"
16608 "4:\n"
16609 ".previous\n"
16610@@ -514,7 +516,7 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
16611 ".section .discard,\"aw\",@progbits\n"
16612 " .byte 0xff + (6f-5f) - (4b-3b)\n" /* size check */
16613 ".previous\n"
16614- ".section .altinstr_replacement,\"ax\"\n"
16615+ ".section .altinstr_replacement,\"a\"\n"
16616 "5: movb $1,%0\n"
16617 "6:\n"
16618 ".previous\n"
16619diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
16620index 50d033a..37deb26 100644
16621--- a/arch/x86/include/asm/desc.h
16622+++ b/arch/x86/include/asm/desc.h
16623@@ -4,6 +4,7 @@
16624 #include <asm/desc_defs.h>
16625 #include <asm/ldt.h>
16626 #include <asm/mmu.h>
16627+#include <asm/pgtable.h>
16628
16629 #include <linux/smp.h>
16630 #include <linux/percpu.h>
16631@@ -17,6 +18,7 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
16632
16633 desc->type = (info->read_exec_only ^ 1) << 1;
16634 desc->type |= info->contents << 2;
16635+ desc->type |= info->seg_not_present ^ 1;
16636
16637 desc->s = 1;
16638 desc->dpl = 0x3;
16639@@ -35,19 +37,14 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
16640 }
16641
16642 extern struct desc_ptr idt_descr;
16643-extern gate_desc idt_table[];
16644-extern struct desc_ptr debug_idt_descr;
16645-extern gate_desc debug_idt_table[];
16646-
16647-struct gdt_page {
16648- struct desc_struct gdt[GDT_ENTRIES];
16649-} __attribute__((aligned(PAGE_SIZE)));
16650-
16651-DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
16652+extern gate_desc idt_table[IDT_ENTRIES];
16653+extern const struct desc_ptr debug_idt_descr;
16654+extern gate_desc debug_idt_table[IDT_ENTRIES];
16655
16656+extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
16657 static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
16658 {
16659- return per_cpu(gdt_page, cpu).gdt;
16660+ return cpu_gdt_table[cpu];
16661 }
16662
16663 #ifdef CONFIG_X86_64
16664@@ -72,8 +69,14 @@ static inline void pack_gate(gate_desc *gate, unsigned char type,
16665 unsigned long base, unsigned dpl, unsigned flags,
16666 unsigned short seg)
16667 {
16668- gate->a = (seg << 16) | (base & 0xffff);
16669- gate->b = (base & 0xffff0000) | (((0x80 | type | (dpl << 5)) & 0xff) << 8);
16670+ gate->gate.offset_low = base;
16671+ gate->gate.seg = seg;
16672+ gate->gate.reserved = 0;
16673+ gate->gate.type = type;
16674+ gate->gate.s = 0;
16675+ gate->gate.dpl = dpl;
16676+ gate->gate.p = 1;
16677+ gate->gate.offset_high = base >> 16;
16678 }
16679
16680 #endif
16681@@ -118,12 +121,16 @@ static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
16682
16683 static inline void native_write_idt_entry(gate_desc *idt, int entry, const gate_desc *gate)
16684 {
16685+ pax_open_kernel();
16686 memcpy(&idt[entry], gate, sizeof(*gate));
16687+ pax_close_kernel();
16688 }
16689
16690 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry, const void *desc)
16691 {
16692+ pax_open_kernel();
16693 memcpy(&ldt[entry], desc, 8);
16694+ pax_close_kernel();
16695 }
16696
16697 static inline void
16698@@ -137,7 +144,9 @@ native_write_gdt_entry(struct desc_struct *gdt, int entry, const void *desc, int
16699 default: size = sizeof(*gdt); break;
16700 }
16701
16702+ pax_open_kernel();
16703 memcpy(&gdt[entry], desc, size);
16704+ pax_close_kernel();
16705 }
16706
16707 static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
16708@@ -210,7 +219,9 @@ static inline void native_set_ldt(const void *addr, unsigned int entries)
16709
16710 static inline void native_load_tr_desc(void)
16711 {
16712+ pax_open_kernel();
16713 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
16714+ pax_close_kernel();
16715 }
16716
16717 static inline void native_load_gdt(const struct desc_ptr *dtr)
16718@@ -247,8 +258,10 @@ static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
16719 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
16720 unsigned int i;
16721
16722+ pax_open_kernel();
16723 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
16724 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
16725+ pax_close_kernel();
16726 }
16727
16728 #define _LDT_empty(info) \
16729@@ -287,7 +300,7 @@ static inline void load_LDT(mm_context_t *pc)
16730 preempt_enable();
16731 }
16732
16733-static inline unsigned long get_desc_base(const struct desc_struct *desc)
16734+static inline unsigned long __intentional_overflow(-1) get_desc_base(const struct desc_struct *desc)
16735 {
16736 return (unsigned)(desc->base0 | ((desc->base1) << 16) | ((desc->base2) << 24));
16737 }
16738@@ -311,7 +324,7 @@ static inline void set_desc_limit(struct desc_struct *desc, unsigned long limit)
16739 }
16740
16741 #ifdef CONFIG_X86_64
16742-static inline void set_nmi_gate(int gate, void *addr)
16743+static inline void set_nmi_gate(int gate, const void *addr)
16744 {
16745 gate_desc s;
16746
16747@@ -321,14 +334,14 @@ static inline void set_nmi_gate(int gate, void *addr)
16748 #endif
16749
16750 #ifdef CONFIG_TRACING
16751-extern struct desc_ptr trace_idt_descr;
16752-extern gate_desc trace_idt_table[];
16753+extern const struct desc_ptr trace_idt_descr;
16754+extern gate_desc trace_idt_table[IDT_ENTRIES];
16755 static inline void write_trace_idt_entry(int entry, const gate_desc *gate)
16756 {
16757 write_idt_entry(trace_idt_table, entry, gate);
16758 }
16759
16760-static inline void _trace_set_gate(int gate, unsigned type, void *addr,
16761+static inline void _trace_set_gate(int gate, unsigned type, const void *addr,
16762 unsigned dpl, unsigned ist, unsigned seg)
16763 {
16764 gate_desc s;
16765@@ -348,7 +361,7 @@ static inline void write_trace_idt_entry(int entry, const gate_desc *gate)
16766 #define _trace_set_gate(gate, type, addr, dpl, ist, seg)
16767 #endif
16768
16769-static inline void _set_gate(int gate, unsigned type, void *addr,
16770+static inline void _set_gate(int gate, unsigned type, const void *addr,
16771 unsigned dpl, unsigned ist, unsigned seg)
16772 {
16773 gate_desc s;
16774@@ -371,9 +384,9 @@ static inline void _set_gate(int gate, unsigned type, void *addr,
16775 #define set_intr_gate(n, addr) \
16776 do { \
16777 BUG_ON((unsigned)n > 0xFF); \
16778- _set_gate(n, GATE_INTERRUPT, (void *)addr, 0, 0, \
16779+ _set_gate(n, GATE_INTERRUPT, (const void *)addr, 0, 0, \
16780 __KERNEL_CS); \
16781- _trace_set_gate(n, GATE_INTERRUPT, (void *)trace_##addr,\
16782+ _trace_set_gate(n, GATE_INTERRUPT, (const void *)trace_##addr,\
16783 0, 0, __KERNEL_CS); \
16784 } while (0)
16785
16786@@ -401,19 +414,19 @@ static inline void alloc_system_vector(int vector)
16787 /*
16788 * This routine sets up an interrupt gate at directory privilege level 3.
16789 */
16790-static inline void set_system_intr_gate(unsigned int n, void *addr)
16791+static inline void set_system_intr_gate(unsigned int n, const void *addr)
16792 {
16793 BUG_ON((unsigned)n > 0xFF);
16794 _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
16795 }
16796
16797-static inline void set_system_trap_gate(unsigned int n, void *addr)
16798+static inline void set_system_trap_gate(unsigned int n, const void *addr)
16799 {
16800 BUG_ON((unsigned)n > 0xFF);
16801 _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
16802 }
16803
16804-static inline void set_trap_gate(unsigned int n, void *addr)
16805+static inline void set_trap_gate(unsigned int n, const void *addr)
16806 {
16807 BUG_ON((unsigned)n > 0xFF);
16808 _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
16809@@ -422,16 +435,16 @@ static inline void set_trap_gate(unsigned int n, void *addr)
16810 static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
16811 {
16812 BUG_ON((unsigned)n > 0xFF);
16813- _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
16814+ _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
16815 }
16816
16817-static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
16818+static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
16819 {
16820 BUG_ON((unsigned)n > 0xFF);
16821 _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
16822 }
16823
16824-static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
16825+static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
16826 {
16827 BUG_ON((unsigned)n > 0xFF);
16828 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
16829@@ -503,4 +516,17 @@ static inline void load_current_idt(void)
16830 else
16831 load_idt((const struct desc_ptr *)&idt_descr);
16832 }
16833+
16834+#ifdef CONFIG_X86_32
16835+static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
16836+{
16837+ struct desc_struct d;
16838+
16839+ if (likely(limit))
16840+ limit = (limit - 1UL) >> PAGE_SHIFT;
16841+ pack_descriptor(&d, base, limit, 0xFB, 0xC);
16842+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
16843+}
16844+#endif
16845+
16846 #endif /* _ASM_X86_DESC_H */
16847diff --git a/arch/x86/include/asm/desc_defs.h b/arch/x86/include/asm/desc_defs.h
16848index 278441f..b95a174 100644
16849--- a/arch/x86/include/asm/desc_defs.h
16850+++ b/arch/x86/include/asm/desc_defs.h
16851@@ -31,6 +31,12 @@ struct desc_struct {
16852 unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
16853 unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
16854 };
16855+ struct {
16856+ u16 offset_low;
16857+ u16 seg;
16858+ unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
16859+ unsigned offset_high: 16;
16860+ } gate;
16861 };
16862 } __attribute__((packed));
16863
16864diff --git a/arch/x86/include/asm/div64.h b/arch/x86/include/asm/div64.h
16865index ced283a..ffe04cc 100644
16866--- a/arch/x86/include/asm/div64.h
16867+++ b/arch/x86/include/asm/div64.h
16868@@ -39,7 +39,7 @@
16869 __mod; \
16870 })
16871
16872-static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
16873+static inline u64 __intentional_overflow(-1) div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
16874 {
16875 union {
16876 u64 v64;
16877diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
16878index 9c999c1..3860cb8 100644
16879--- a/arch/x86/include/asm/elf.h
16880+++ b/arch/x86/include/asm/elf.h
16881@@ -243,7 +243,25 @@ extern int force_personality32;
16882 the loader. We need to make sure that it is out of the way of the program
16883 that it will "exec", and that there is sufficient room for the brk. */
16884
16885+#ifdef CONFIG_PAX_SEGMEXEC
16886+#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
16887+#else
16888 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
16889+#endif
16890+
16891+#ifdef CONFIG_PAX_ASLR
16892+#ifdef CONFIG_X86_32
16893+#define PAX_ELF_ET_DYN_BASE 0x10000000UL
16894+
16895+#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
16896+#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
16897+#else
16898+#define PAX_ELF_ET_DYN_BASE 0x400000UL
16899+
16900+#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_ADDR32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
16901+#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_ADDR32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
16902+#endif
16903+#endif
16904
16905 /* This yields a mask that user programs can use to figure out what
16906 instruction set this CPU supports. This could be done in user space,
16907@@ -296,16 +314,12 @@ do { \
16908
16909 #define ARCH_DLINFO \
16910 do { \
16911- if (vdso_enabled) \
16912- NEW_AUX_ENT(AT_SYSINFO_EHDR, \
16913- (unsigned long)current->mm->context.vdso); \
16914+ NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
16915 } while (0)
16916
16917 #define ARCH_DLINFO_X32 \
16918 do { \
16919- if (vdso_enabled) \
16920- NEW_AUX_ENT(AT_SYSINFO_EHDR, \
16921- (unsigned long)current->mm->context.vdso); \
16922+ NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
16923 } while (0)
16924
16925 #define AT_SYSINFO 32
16926@@ -320,7 +334,7 @@ else \
16927
16928 #endif /* !CONFIG_X86_32 */
16929
16930-#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
16931+#define VDSO_CURRENT_BASE (current->mm->context.vdso)
16932
16933 #define VDSO_ENTRY \
16934 ((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall))
16935@@ -336,9 +350,6 @@ extern int x32_setup_additional_pages(struct linux_binprm *bprm,
16936 extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
16937 #define compat_arch_setup_additional_pages syscall32_setup_pages
16938
16939-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
16940-#define arch_randomize_brk arch_randomize_brk
16941-
16942 /*
16943 * True on X86_32 or when emulating IA32 on X86_64
16944 */
16945diff --git a/arch/x86/include/asm/emergency-restart.h b/arch/x86/include/asm/emergency-restart.h
16946index 77a99ac..39ff7f5 100644
16947--- a/arch/x86/include/asm/emergency-restart.h
16948+++ b/arch/x86/include/asm/emergency-restart.h
16949@@ -1,6 +1,6 @@
16950 #ifndef _ASM_X86_EMERGENCY_RESTART_H
16951 #define _ASM_X86_EMERGENCY_RESTART_H
16952
16953-extern void machine_emergency_restart(void);
16954+extern void machine_emergency_restart(void) __noreturn;
16955
16956 #endif /* _ASM_X86_EMERGENCY_RESTART_H */
16957diff --git a/arch/x86/include/asm/floppy.h b/arch/x86/include/asm/floppy.h
16958index d3d7469..677ef72 100644
16959--- a/arch/x86/include/asm/floppy.h
16960+++ b/arch/x86/include/asm/floppy.h
16961@@ -229,18 +229,18 @@ static struct fd_routine_l {
16962 int (*_dma_setup)(char *addr, unsigned long size, int mode, int io);
16963 } fd_routine[] = {
16964 {
16965- request_dma,
16966- free_dma,
16967- get_dma_residue,
16968- dma_mem_alloc,
16969- hard_dma_setup
16970+ ._request_dma = request_dma,
16971+ ._free_dma = free_dma,
16972+ ._get_dma_residue = get_dma_residue,
16973+ ._dma_mem_alloc = dma_mem_alloc,
16974+ ._dma_setup = hard_dma_setup
16975 },
16976 {
16977- vdma_request_dma,
16978- vdma_nop,
16979- vdma_get_dma_residue,
16980- vdma_mem_alloc,
16981- vdma_dma_setup
16982+ ._request_dma = vdma_request_dma,
16983+ ._free_dma = vdma_nop,
16984+ ._get_dma_residue = vdma_get_dma_residue,
16985+ ._dma_mem_alloc = vdma_mem_alloc,
16986+ ._dma_setup = vdma_dma_setup
16987 }
16988 };
16989
16990diff --git a/arch/x86/include/asm/fpu-internal.h b/arch/x86/include/asm/fpu-internal.h
16991index cea1c76..6c0d79b 100644
16992--- a/arch/x86/include/asm/fpu-internal.h
16993+++ b/arch/x86/include/asm/fpu-internal.h
16994@@ -124,8 +124,11 @@ static inline void sanitize_i387_state(struct task_struct *tsk)
16995 #define user_insn(insn, output, input...) \
16996 ({ \
16997 int err; \
16998+ pax_open_userland(); \
16999 asm volatile(ASM_STAC "\n" \
17000- "1:" #insn "\n\t" \
17001+ "1:" \
17002+ __copyuser_seg \
17003+ #insn "\n\t" \
17004 "2: " ASM_CLAC "\n" \
17005 ".section .fixup,\"ax\"\n" \
17006 "3: movl $-1,%[err]\n" \
17007@@ -134,6 +137,7 @@ static inline void sanitize_i387_state(struct task_struct *tsk)
17008 _ASM_EXTABLE(1b, 3b) \
17009 : [err] "=r" (err), output \
17010 : "0"(0), input); \
17011+ pax_close_userland(); \
17012 err; \
17013 })
17014
17015@@ -298,7 +302,7 @@ static inline int restore_fpu_checking(struct task_struct *tsk)
17016 "fnclex\n\t"
17017 "emms\n\t"
17018 "fildl %P[addr]" /* set F?P to defined value */
17019- : : [addr] "m" (tsk->thread.fpu.has_fpu));
17020+ : : [addr] "m" (init_tss[raw_smp_processor_id()].x86_tss.sp0));
17021 }
17022
17023 return fpu_restore_checking(&tsk->thread.fpu);
17024diff --git a/arch/x86/include/asm/futex.h b/arch/x86/include/asm/futex.h
17025index b4c1f54..e290c08 100644
17026--- a/arch/x86/include/asm/futex.h
17027+++ b/arch/x86/include/asm/futex.h
17028@@ -12,6 +12,7 @@
17029 #include <asm/smap.h>
17030
17031 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
17032+ typecheck(u32 __user *, uaddr); \
17033 asm volatile("\t" ASM_STAC "\n" \
17034 "1:\t" insn "\n" \
17035 "2:\t" ASM_CLAC "\n" \
17036@@ -20,15 +21,16 @@
17037 "\tjmp\t2b\n" \
17038 "\t.previous\n" \
17039 _ASM_EXTABLE(1b, 3b) \
17040- : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
17041+ : "=r" (oldval), "=r" (ret), "+m" (*(u32 __user *)____m(uaddr)) \
17042 : "i" (-EFAULT), "0" (oparg), "1" (0))
17043
17044 #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
17045+ typecheck(u32 __user *, uaddr); \
17046 asm volatile("\t" ASM_STAC "\n" \
17047 "1:\tmovl %2, %0\n" \
17048 "\tmovl\t%0, %3\n" \
17049 "\t" insn "\n" \
17050- "2:\t" LOCK_PREFIX "cmpxchgl %3, %2\n" \
17051+ "2:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %3, %2\n" \
17052 "\tjnz\t1b\n" \
17053 "3:\t" ASM_CLAC "\n" \
17054 "\t.section .fixup,\"ax\"\n" \
17055@@ -38,7 +40,7 @@
17056 _ASM_EXTABLE(1b, 4b) \
17057 _ASM_EXTABLE(2b, 4b) \
17058 : "=&a" (oldval), "=&r" (ret), \
17059- "+m" (*uaddr), "=&r" (tem) \
17060+ "+m" (*(u32 __user *)____m(uaddr)), "=&r" (tem) \
17061 : "r" (oparg), "i" (-EFAULT), "1" (0))
17062
17063 static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
17064@@ -57,12 +59,13 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
17065
17066 pagefault_disable();
17067
17068+ pax_open_userland();
17069 switch (op) {
17070 case FUTEX_OP_SET:
17071- __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
17072+ __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
17073 break;
17074 case FUTEX_OP_ADD:
17075- __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
17076+ __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
17077 uaddr, oparg);
17078 break;
17079 case FUTEX_OP_OR:
17080@@ -77,6 +80,7 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
17081 default:
17082 ret = -ENOSYS;
17083 }
17084+ pax_close_userland();
17085
17086 pagefault_enable();
17087
17088diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
17089index 67d69b8..50e4b77 100644
17090--- a/arch/x86/include/asm/hw_irq.h
17091+++ b/arch/x86/include/asm/hw_irq.h
17092@@ -165,8 +165,8 @@ extern void setup_ioapic_dest(void);
17093 extern void enable_IO_APIC(void);
17094
17095 /* Statistics */
17096-extern atomic_t irq_err_count;
17097-extern atomic_t irq_mis_count;
17098+extern atomic_unchecked_t irq_err_count;
17099+extern atomic_unchecked_t irq_mis_count;
17100
17101 /* EISA */
17102 extern void eisa_set_level_irq(unsigned int irq);
17103diff --git a/arch/x86/include/asm/i8259.h b/arch/x86/include/asm/i8259.h
17104index a203659..9889f1c 100644
17105--- a/arch/x86/include/asm/i8259.h
17106+++ b/arch/x86/include/asm/i8259.h
17107@@ -62,7 +62,7 @@ struct legacy_pic {
17108 void (*init)(int auto_eoi);
17109 int (*irq_pending)(unsigned int irq);
17110 void (*make_irq)(unsigned int irq);
17111-};
17112+} __do_const;
17113
17114 extern struct legacy_pic *legacy_pic;
17115 extern struct legacy_pic null_legacy_pic;
17116diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
17117index 91d9c69..dfae7d0 100644
17118--- a/arch/x86/include/asm/io.h
17119+++ b/arch/x86/include/asm/io.h
17120@@ -51,12 +51,12 @@ static inline void name(type val, volatile void __iomem *addr) \
17121 "m" (*(volatile type __force *)addr) barrier); }
17122
17123 build_mmio_read(readb, "b", unsigned char, "=q", :"memory")
17124-build_mmio_read(readw, "w", unsigned short, "=r", :"memory")
17125-build_mmio_read(readl, "l", unsigned int, "=r", :"memory")
17126+build_mmio_read(__intentional_overflow(-1) readw, "w", unsigned short, "=r", :"memory")
17127+build_mmio_read(__intentional_overflow(-1) readl, "l", unsigned int, "=r", :"memory")
17128
17129 build_mmio_read(__readb, "b", unsigned char, "=q", )
17130-build_mmio_read(__readw, "w", unsigned short, "=r", )
17131-build_mmio_read(__readl, "l", unsigned int, "=r", )
17132+build_mmio_read(__intentional_overflow(-1) __readw, "w", unsigned short, "=r", )
17133+build_mmio_read(__intentional_overflow(-1) __readl, "l", unsigned int, "=r", )
17134
17135 build_mmio_write(writeb, "b", unsigned char, "q", :"memory")
17136 build_mmio_write(writew, "w", unsigned short, "r", :"memory")
17137@@ -184,7 +184,7 @@ static inline void __iomem *ioremap(resource_size_t offset, unsigned long size)
17138 return ioremap_nocache(offset, size);
17139 }
17140
17141-extern void iounmap(volatile void __iomem *addr);
17142+extern void iounmap(const volatile void __iomem *addr);
17143
17144 extern void set_iounmap_nonlazy(void);
17145
17146@@ -194,6 +194,17 @@ extern void set_iounmap_nonlazy(void);
17147
17148 #include <linux/vmalloc.h>
17149
17150+#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
17151+static inline int valid_phys_addr_range(unsigned long addr, size_t count)
17152+{
17153+ return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
17154+}
17155+
17156+static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
17157+{
17158+ return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
17159+}
17160+
17161 /*
17162 * Convert a virtual cached pointer to an uncached pointer
17163 */
17164diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
17165index bba3cf8..06bc8da 100644
17166--- a/arch/x86/include/asm/irqflags.h
17167+++ b/arch/x86/include/asm/irqflags.h
17168@@ -141,6 +141,11 @@ static inline notrace unsigned long arch_local_irq_save(void)
17169 sti; \
17170 sysexit
17171
17172+#define GET_CR0_INTO_RDI mov %cr0, %rdi
17173+#define SET_RDI_INTO_CR0 mov %rdi, %cr0
17174+#define GET_CR3_INTO_RDI mov %cr3, %rdi
17175+#define SET_RDI_INTO_CR3 mov %rdi, %cr3
17176+
17177 #else
17178 #define INTERRUPT_RETURN iret
17179 #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
17180diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h
17181index 9454c16..e4100e3 100644
17182--- a/arch/x86/include/asm/kprobes.h
17183+++ b/arch/x86/include/asm/kprobes.h
17184@@ -38,13 +38,8 @@ typedef u8 kprobe_opcode_t;
17185 #define RELATIVEJUMP_SIZE 5
17186 #define RELATIVECALL_OPCODE 0xe8
17187 #define RELATIVE_ADDR_SIZE 4
17188-#define MAX_STACK_SIZE 64
17189-#define MIN_STACK_SIZE(ADDR) \
17190- (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
17191- THREAD_SIZE - (unsigned long)(ADDR))) \
17192- ? (MAX_STACK_SIZE) \
17193- : (((unsigned long)current_thread_info()) + \
17194- THREAD_SIZE - (unsigned long)(ADDR)))
17195+#define MAX_STACK_SIZE 64UL
17196+#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
17197
17198 #define flush_insn_slot(p) do { } while (0)
17199
17200diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h
17201index 4ad6560..75c7bdd 100644
17202--- a/arch/x86/include/asm/local.h
17203+++ b/arch/x86/include/asm/local.h
17204@@ -10,33 +10,97 @@ typedef struct {
17205 atomic_long_t a;
17206 } local_t;
17207
17208+typedef struct {
17209+ atomic_long_unchecked_t a;
17210+} local_unchecked_t;
17211+
17212 #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
17213
17214 #define local_read(l) atomic_long_read(&(l)->a)
17215+#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
17216 #define local_set(l, i) atomic_long_set(&(l)->a, (i))
17217+#define local_set_unchecked(l, i) atomic_long_set_unchecked(&(l)->a, (i))
17218
17219 static inline void local_inc(local_t *l)
17220 {
17221- asm volatile(_ASM_INC "%0"
17222+ asm volatile(_ASM_INC "%0\n"
17223+
17224+#ifdef CONFIG_PAX_REFCOUNT
17225+ "jno 0f\n"
17226+ _ASM_DEC "%0\n"
17227+ "int $4\n0:\n"
17228+ _ASM_EXTABLE(0b, 0b)
17229+#endif
17230+
17231+ : "+m" (l->a.counter));
17232+}
17233+
17234+static inline void local_inc_unchecked(local_unchecked_t *l)
17235+{
17236+ asm volatile(_ASM_INC "%0\n"
17237 : "+m" (l->a.counter));
17238 }
17239
17240 static inline void local_dec(local_t *l)
17241 {
17242- asm volatile(_ASM_DEC "%0"
17243+ asm volatile(_ASM_DEC "%0\n"
17244+
17245+#ifdef CONFIG_PAX_REFCOUNT
17246+ "jno 0f\n"
17247+ _ASM_INC "%0\n"
17248+ "int $4\n0:\n"
17249+ _ASM_EXTABLE(0b, 0b)
17250+#endif
17251+
17252+ : "+m" (l->a.counter));
17253+}
17254+
17255+static inline void local_dec_unchecked(local_unchecked_t *l)
17256+{
17257+ asm volatile(_ASM_DEC "%0\n"
17258 : "+m" (l->a.counter));
17259 }
17260
17261 static inline void local_add(long i, local_t *l)
17262 {
17263- asm volatile(_ASM_ADD "%1,%0"
17264+ asm volatile(_ASM_ADD "%1,%0\n"
17265+
17266+#ifdef CONFIG_PAX_REFCOUNT
17267+ "jno 0f\n"
17268+ _ASM_SUB "%1,%0\n"
17269+ "int $4\n0:\n"
17270+ _ASM_EXTABLE(0b, 0b)
17271+#endif
17272+
17273+ : "+m" (l->a.counter)
17274+ : "ir" (i));
17275+}
17276+
17277+static inline void local_add_unchecked(long i, local_unchecked_t *l)
17278+{
17279+ asm volatile(_ASM_ADD "%1,%0\n"
17280 : "+m" (l->a.counter)
17281 : "ir" (i));
17282 }
17283
17284 static inline void local_sub(long i, local_t *l)
17285 {
17286- asm volatile(_ASM_SUB "%1,%0"
17287+ asm volatile(_ASM_SUB "%1,%0\n"
17288+
17289+#ifdef CONFIG_PAX_REFCOUNT
17290+ "jno 0f\n"
17291+ _ASM_ADD "%1,%0\n"
17292+ "int $4\n0:\n"
17293+ _ASM_EXTABLE(0b, 0b)
17294+#endif
17295+
17296+ : "+m" (l->a.counter)
17297+ : "ir" (i));
17298+}
17299+
17300+static inline void local_sub_unchecked(long i, local_unchecked_t *l)
17301+{
17302+ asm volatile(_ASM_SUB "%1,%0\n"
17303 : "+m" (l->a.counter)
17304 : "ir" (i));
17305 }
17306@@ -52,7 +116,7 @@ static inline void local_sub(long i, local_t *l)
17307 */
17308 static inline int local_sub_and_test(long i, local_t *l)
17309 {
17310- GEN_BINARY_RMWcc(_ASM_SUB, l->a.counter, "er", i, "%0", "e");
17311+ GEN_BINARY_RMWcc(_ASM_SUB, _ASM_ADD, l->a.counter, "er", i, "%0", "e");
17312 }
17313
17314 /**
17315@@ -65,7 +129,7 @@ static inline int local_sub_and_test(long i, local_t *l)
17316 */
17317 static inline int local_dec_and_test(local_t *l)
17318 {
17319- GEN_UNARY_RMWcc(_ASM_DEC, l->a.counter, "%0", "e");
17320+ GEN_UNARY_RMWcc(_ASM_DEC, _ASM_INC, l->a.counter, "%0", "e");
17321 }
17322
17323 /**
17324@@ -78,7 +142,7 @@ static inline int local_dec_and_test(local_t *l)
17325 */
17326 static inline int local_inc_and_test(local_t *l)
17327 {
17328- GEN_UNARY_RMWcc(_ASM_INC, l->a.counter, "%0", "e");
17329+ GEN_UNARY_RMWcc(_ASM_INC, _ASM_DEC, l->a.counter, "%0", "e");
17330 }
17331
17332 /**
17333@@ -92,7 +156,7 @@ static inline int local_inc_and_test(local_t *l)
17334 */
17335 static inline int local_add_negative(long i, local_t *l)
17336 {
17337- GEN_BINARY_RMWcc(_ASM_ADD, l->a.counter, "er", i, "%0", "s");
17338+ GEN_BINARY_RMWcc(_ASM_ADD, _ASM_SUB, l->a.counter, "er", i, "%0", "s");
17339 }
17340
17341 /**
17342@@ -105,6 +169,30 @@ static inline int local_add_negative(long i, local_t *l)
17343 static inline long local_add_return(long i, local_t *l)
17344 {
17345 long __i = i;
17346+ asm volatile(_ASM_XADD "%0, %1\n"
17347+
17348+#ifdef CONFIG_PAX_REFCOUNT
17349+ "jno 0f\n"
17350+ _ASM_MOV "%0,%1\n"
17351+ "int $4\n0:\n"
17352+ _ASM_EXTABLE(0b, 0b)
17353+#endif
17354+
17355+ : "+r" (i), "+m" (l->a.counter)
17356+ : : "memory");
17357+ return i + __i;
17358+}
17359+
17360+/**
17361+ * local_add_return_unchecked - add and return
17362+ * @i: integer value to add
17363+ * @l: pointer to type local_unchecked_t
17364+ *
17365+ * Atomically adds @i to @l and returns @i + @l
17366+ */
17367+static inline long local_add_return_unchecked(long i, local_unchecked_t *l)
17368+{
17369+ long __i = i;
17370 asm volatile(_ASM_XADD "%0, %1;"
17371 : "+r" (i), "+m" (l->a.counter)
17372 : : "memory");
17373@@ -121,6 +209,8 @@ static inline long local_sub_return(long i, local_t *l)
17374
17375 #define local_cmpxchg(l, o, n) \
17376 (cmpxchg_local(&((l)->a.counter), (o), (n)))
17377+#define local_cmpxchg_unchecked(l, o, n) \
17378+ (cmpxchg_local(&((l)->a.counter), (o), (n)))
17379 /* Always has a lock prefix */
17380 #define local_xchg(l, n) (xchg(&((l)->a.counter), (n)))
17381
17382diff --git a/arch/x86/include/asm/mman.h b/arch/x86/include/asm/mman.h
17383new file mode 100644
17384index 0000000..2bfd3ba
17385--- /dev/null
17386+++ b/arch/x86/include/asm/mman.h
17387@@ -0,0 +1,15 @@
17388+#ifndef _X86_MMAN_H
17389+#define _X86_MMAN_H
17390+
17391+#include <uapi/asm/mman.h>
17392+
17393+#ifdef __KERNEL__
17394+#ifndef __ASSEMBLY__
17395+#ifdef CONFIG_X86_32
17396+#define arch_mmap_check i386_mmap_check
17397+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags);
17398+#endif
17399+#endif
17400+#endif
17401+
17402+#endif /* X86_MMAN_H */
17403diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
17404index 5f55e69..e20bfb1 100644
17405--- a/arch/x86/include/asm/mmu.h
17406+++ b/arch/x86/include/asm/mmu.h
17407@@ -9,7 +9,7 @@
17408 * we put the segment information here.
17409 */
17410 typedef struct {
17411- void *ldt;
17412+ struct desc_struct *ldt;
17413 int size;
17414
17415 #ifdef CONFIG_X86_64
17416@@ -18,7 +18,19 @@ typedef struct {
17417 #endif
17418
17419 struct mutex lock;
17420- void *vdso;
17421+ unsigned long vdso;
17422+
17423+#ifdef CONFIG_X86_32
17424+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
17425+ unsigned long user_cs_base;
17426+ unsigned long user_cs_limit;
17427+
17428+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
17429+ cpumask_t cpu_user_cs_mask;
17430+#endif
17431+
17432+#endif
17433+#endif
17434 } mm_context_t;
17435
17436 #ifdef CONFIG_SMP
17437diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
17438index be12c53..4d24039 100644
17439--- a/arch/x86/include/asm/mmu_context.h
17440+++ b/arch/x86/include/asm/mmu_context.h
17441@@ -24,6 +24,20 @@ void destroy_context(struct mm_struct *mm);
17442
17443 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
17444 {
17445+
17446+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17447+ if (!(static_cpu_has(X86_FEATURE_PCID))) {
17448+ unsigned int i;
17449+ pgd_t *pgd;
17450+
17451+ pax_open_kernel();
17452+ pgd = get_cpu_pgd(smp_processor_id(), kernel);
17453+ for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
17454+ set_pgd_batched(pgd+i, native_make_pgd(0));
17455+ pax_close_kernel();
17456+ }
17457+#endif
17458+
17459 #ifdef CONFIG_SMP
17460 if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
17461 this_cpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
17462@@ -34,16 +48,59 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
17463 struct task_struct *tsk)
17464 {
17465 unsigned cpu = smp_processor_id();
17466+#if defined(CONFIG_X86_32) && defined(CONFIG_SMP) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
17467+ int tlbstate = TLBSTATE_OK;
17468+#endif
17469
17470 if (likely(prev != next)) {
17471 #ifdef CONFIG_SMP
17472+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
17473+ tlbstate = this_cpu_read(cpu_tlbstate.state);
17474+#endif
17475 this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
17476 this_cpu_write(cpu_tlbstate.active_mm, next);
17477 #endif
17478 cpumask_set_cpu(cpu, mm_cpumask(next));
17479
17480 /* Re-load page tables */
17481+#ifdef CONFIG_PAX_PER_CPU_PGD
17482+ pax_open_kernel();
17483+
17484+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17485+ if (static_cpu_has(X86_FEATURE_PCID))
17486+ __clone_user_pgds(get_cpu_pgd(cpu, user), next->pgd);
17487+ else
17488+#endif
17489+
17490+ __clone_user_pgds(get_cpu_pgd(cpu, kernel), next->pgd);
17491+ __shadow_user_pgds(get_cpu_pgd(cpu, kernel) + USER_PGD_PTRS, next->pgd);
17492+ pax_close_kernel();
17493+ BUG_ON((__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL) != (read_cr3() & __PHYSICAL_MASK) && (__pa(get_cpu_pgd(cpu, user)) | PCID_USER) != (read_cr3() & __PHYSICAL_MASK));
17494+
17495+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17496+ if (static_cpu_has(X86_FEATURE_PCID)) {
17497+ if (static_cpu_has(X86_FEATURE_INVPCID)) {
17498+ u64 descriptor[2];
17499+ descriptor[0] = PCID_USER;
17500+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_CONTEXT) : "memory");
17501+ if (!static_cpu_has(X86_FEATURE_STRONGUDEREF)) {
17502+ descriptor[0] = PCID_KERNEL;
17503+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_CONTEXT) : "memory");
17504+ }
17505+ } else {
17506+ write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER);
17507+ if (static_cpu_has(X86_FEATURE_STRONGUDEREF))
17508+ write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL | PCID_NOFLUSH);
17509+ else
17510+ write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL);
17511+ }
17512+ } else
17513+#endif
17514+
17515+ load_cr3(get_cpu_pgd(cpu, kernel));
17516+#else
17517 load_cr3(next->pgd);
17518+#endif
17519
17520 /* Stop flush ipis for the previous mm */
17521 cpumask_clear_cpu(cpu, mm_cpumask(prev));
17522@@ -51,9 +108,67 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
17523 /* Load the LDT, if the LDT is different: */
17524 if (unlikely(prev->context.ldt != next->context.ldt))
17525 load_LDT_nolock(&next->context);
17526+
17527+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
17528+ if (!(__supported_pte_mask & _PAGE_NX)) {
17529+ smp_mb__before_clear_bit();
17530+ cpu_clear(cpu, prev->context.cpu_user_cs_mask);
17531+ smp_mb__after_clear_bit();
17532+ cpu_set(cpu, next->context.cpu_user_cs_mask);
17533+ }
17534+#endif
17535+
17536+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
17537+ if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
17538+ prev->context.user_cs_limit != next->context.user_cs_limit))
17539+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
17540+#ifdef CONFIG_SMP
17541+ else if (unlikely(tlbstate != TLBSTATE_OK))
17542+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
17543+#endif
17544+#endif
17545+
17546 }
17547+ else {
17548+
17549+#ifdef CONFIG_PAX_PER_CPU_PGD
17550+ pax_open_kernel();
17551+
17552+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17553+ if (static_cpu_has(X86_FEATURE_PCID))
17554+ __clone_user_pgds(get_cpu_pgd(cpu, user), next->pgd);
17555+ else
17556+#endif
17557+
17558+ __clone_user_pgds(get_cpu_pgd(cpu, kernel), next->pgd);
17559+ __shadow_user_pgds(get_cpu_pgd(cpu, kernel) + USER_PGD_PTRS, next->pgd);
17560+ pax_close_kernel();
17561+ BUG_ON((__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL) != (read_cr3() & __PHYSICAL_MASK) && (__pa(get_cpu_pgd(cpu, user)) | PCID_USER) != (read_cr3() & __PHYSICAL_MASK));
17562+
17563+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17564+ if (static_cpu_has(X86_FEATURE_PCID)) {
17565+ if (static_cpu_has(X86_FEATURE_INVPCID)) {
17566+ u64 descriptor[2];
17567+ descriptor[0] = PCID_USER;
17568+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_CONTEXT) : "memory");
17569+ if (!static_cpu_has(X86_FEATURE_STRONGUDEREF)) {
17570+ descriptor[0] = PCID_KERNEL;
17571+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_CONTEXT) : "memory");
17572+ }
17573+ } else {
17574+ write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER);
17575+ if (static_cpu_has(X86_FEATURE_STRONGUDEREF))
17576+ write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL | PCID_NOFLUSH);
17577+ else
17578+ write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL);
17579+ }
17580+ } else
17581+#endif
17582+
17583+ load_cr3(get_cpu_pgd(cpu, kernel));
17584+#endif
17585+
17586 #ifdef CONFIG_SMP
17587- else {
17588 this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
17589 BUG_ON(this_cpu_read(cpu_tlbstate.active_mm) != next);
17590
17591@@ -70,11 +185,28 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
17592 * tlb flush IPI delivery. We must reload CR3
17593 * to make sure to use no freed page tables.
17594 */
17595+
17596+#ifndef CONFIG_PAX_PER_CPU_PGD
17597 load_cr3(next->pgd);
17598+#endif
17599+
17600 load_LDT_nolock(&next->context);
17601+
17602+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
17603+ if (!(__supported_pte_mask & _PAGE_NX))
17604+ cpu_set(cpu, next->context.cpu_user_cs_mask);
17605+#endif
17606+
17607+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
17608+#ifdef CONFIG_PAX_PAGEEXEC
17609+ if (!((next->pax_flags & MF_PAX_PAGEEXEC) && (__supported_pte_mask & _PAGE_NX)))
17610+#endif
17611+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
17612+#endif
17613+
17614 }
17615+#endif
17616 }
17617-#endif
17618 }
17619
17620 #define activate_mm(prev, next) \
17621diff --git a/arch/x86/include/asm/module.h b/arch/x86/include/asm/module.h
17622index e3b7819..b257c64 100644
17623--- a/arch/x86/include/asm/module.h
17624+++ b/arch/x86/include/asm/module.h
17625@@ -5,6 +5,7 @@
17626
17627 #ifdef CONFIG_X86_64
17628 /* X86_64 does not define MODULE_PROC_FAMILY */
17629+#define MODULE_PROC_FAMILY ""
17630 #elif defined CONFIG_M486
17631 #define MODULE_PROC_FAMILY "486 "
17632 #elif defined CONFIG_M586
17633@@ -57,8 +58,20 @@
17634 #error unknown processor family
17635 #endif
17636
17637-#ifdef CONFIG_X86_32
17638-# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY
17639+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
17640+#define MODULE_PAX_KERNEXEC "KERNEXEC_BTS "
17641+#elif defined(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR)
17642+#define MODULE_PAX_KERNEXEC "KERNEXEC_OR "
17643+#else
17644+#define MODULE_PAX_KERNEXEC ""
17645 #endif
17646
17647+#ifdef CONFIG_PAX_MEMORY_UDEREF
17648+#define MODULE_PAX_UDEREF "UDEREF "
17649+#else
17650+#define MODULE_PAX_UDEREF ""
17651+#endif
17652+
17653+#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF
17654+
17655 #endif /* _ASM_X86_MODULE_H */
17656diff --git a/arch/x86/include/asm/nmi.h b/arch/x86/include/asm/nmi.h
17657index 86f9301..b365cda 100644
17658--- a/arch/x86/include/asm/nmi.h
17659+++ b/arch/x86/include/asm/nmi.h
17660@@ -40,11 +40,11 @@ struct nmiaction {
17661 nmi_handler_t handler;
17662 unsigned long flags;
17663 const char *name;
17664-};
17665+} __do_const;
17666
17667 #define register_nmi_handler(t, fn, fg, n, init...) \
17668 ({ \
17669- static struct nmiaction init fn##_na = { \
17670+ static const struct nmiaction init fn##_na = { \
17671 .handler = (fn), \
17672 .name = (n), \
17673 .flags = (fg), \
17674@@ -52,7 +52,7 @@ struct nmiaction {
17675 __register_nmi_handler((t), &fn##_na); \
17676 })
17677
17678-int __register_nmi_handler(unsigned int, struct nmiaction *);
17679+int __register_nmi_handler(unsigned int, const struct nmiaction *);
17680
17681 void unregister_nmi_handler(unsigned int, const char *);
17682
17683diff --git a/arch/x86/include/asm/page.h b/arch/x86/include/asm/page.h
17684index 775873d..de5f0304 100644
17685--- a/arch/x86/include/asm/page.h
17686+++ b/arch/x86/include/asm/page.h
17687@@ -52,6 +52,7 @@ static inline void copy_user_page(void *to, void *from, unsigned long vaddr,
17688 __phys_addr_symbol(__phys_reloc_hide((unsigned long)(x)))
17689
17690 #define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET))
17691+#define __early_va(x) ((void *)((unsigned long)(x)+__START_KERNEL_map - phys_base))
17692
17693 #define __boot_va(x) __va(x)
17694 #define __boot_pa(x) __pa(x)
17695diff --git a/arch/x86/include/asm/page_64.h b/arch/x86/include/asm/page_64.h
17696index 0f1ddee..e2fc3d1 100644
17697--- a/arch/x86/include/asm/page_64.h
17698+++ b/arch/x86/include/asm/page_64.h
17699@@ -7,9 +7,9 @@
17700
17701 /* duplicated to the one in bootmem.h */
17702 extern unsigned long max_pfn;
17703-extern unsigned long phys_base;
17704+extern const unsigned long phys_base;
17705
17706-static inline unsigned long __phys_addr_nodebug(unsigned long x)
17707+static inline unsigned long __intentional_overflow(-1) __phys_addr_nodebug(unsigned long x)
17708 {
17709 unsigned long y = x - __START_KERNEL_map;
17710
17711diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h
17712index 8de6d9c..6782051 100644
17713--- a/arch/x86/include/asm/page_64_types.h
17714+++ b/arch/x86/include/asm/page_64_types.h
17715@@ -1,7 +1,7 @@
17716 #ifndef _ASM_X86_PAGE_64_DEFS_H
17717 #define _ASM_X86_PAGE_64_DEFS_H
17718
17719-#define THREAD_SIZE_ORDER 1
17720+#define THREAD_SIZE_ORDER 2
17721 #define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER)
17722 #define CURRENT_MASK (~(THREAD_SIZE - 1))
17723
17724diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
17725index cd6e1610..70f4418 100644
17726--- a/arch/x86/include/asm/paravirt.h
17727+++ b/arch/x86/include/asm/paravirt.h
17728@@ -560,7 +560,7 @@ static inline pmd_t __pmd(pmdval_t val)
17729 return (pmd_t) { ret };
17730 }
17731
17732-static inline pmdval_t pmd_val(pmd_t pmd)
17733+static inline __intentional_overflow(-1) pmdval_t pmd_val(pmd_t pmd)
17734 {
17735 pmdval_t ret;
17736
17737@@ -626,6 +626,18 @@ static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
17738 val);
17739 }
17740
17741+static inline void set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
17742+{
17743+ pgdval_t val = native_pgd_val(pgd);
17744+
17745+ if (sizeof(pgdval_t) > sizeof(long))
17746+ PVOP_VCALL3(pv_mmu_ops.set_pgd_batched, pgdp,
17747+ val, (u64)val >> 32);
17748+ else
17749+ PVOP_VCALL2(pv_mmu_ops.set_pgd_batched, pgdp,
17750+ val);
17751+}
17752+
17753 static inline void pgd_clear(pgd_t *pgdp)
17754 {
17755 set_pgd(pgdp, __pgd(0));
17756@@ -710,6 +722,21 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
17757 pv_mmu_ops.set_fixmap(idx, phys, flags);
17758 }
17759
17760+#ifdef CONFIG_PAX_KERNEXEC
17761+static inline unsigned long pax_open_kernel(void)
17762+{
17763+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
17764+}
17765+
17766+static inline unsigned long pax_close_kernel(void)
17767+{
17768+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
17769+}
17770+#else
17771+static inline unsigned long pax_open_kernel(void) { return 0; }
17772+static inline unsigned long pax_close_kernel(void) { return 0; }
17773+#endif
17774+
17775 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
17776
17777 static __always_inline void __ticket_lock_spinning(struct arch_spinlock *lock,
17778@@ -906,7 +933,7 @@ extern void default_banner(void);
17779
17780 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
17781 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
17782-#define PARA_INDIRECT(addr) *%cs:addr
17783+#define PARA_INDIRECT(addr) *%ss:addr
17784 #endif
17785
17786 #define INTERRUPT_RETURN \
17787@@ -981,6 +1008,21 @@ extern void default_banner(void);
17788 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
17789 CLBR_NONE, \
17790 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
17791+
17792+#define GET_CR0_INTO_RDI \
17793+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
17794+ mov %rax,%rdi
17795+
17796+#define SET_RDI_INTO_CR0 \
17797+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
17798+
17799+#define GET_CR3_INTO_RDI \
17800+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
17801+ mov %rax,%rdi
17802+
17803+#define SET_RDI_INTO_CR3 \
17804+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
17805+
17806 #endif /* CONFIG_X86_32 */
17807
17808 #endif /* __ASSEMBLY__ */
17809diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
17810index 7549b8b..f0edfda 100644
17811--- a/arch/x86/include/asm/paravirt_types.h
17812+++ b/arch/x86/include/asm/paravirt_types.h
17813@@ -84,7 +84,7 @@ struct pv_init_ops {
17814 */
17815 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
17816 unsigned long addr, unsigned len);
17817-};
17818+} __no_const __no_randomize_layout;
17819
17820
17821 struct pv_lazy_ops {
17822@@ -92,13 +92,13 @@ struct pv_lazy_ops {
17823 void (*enter)(void);
17824 void (*leave)(void);
17825 void (*flush)(void);
17826-};
17827+} __no_randomize_layout;
17828
17829 struct pv_time_ops {
17830 unsigned long long (*sched_clock)(void);
17831 unsigned long long (*steal_clock)(int cpu);
17832 unsigned long (*get_tsc_khz)(void);
17833-};
17834+} __no_const __no_randomize_layout;
17835
17836 struct pv_cpu_ops {
17837 /* hooks for various privileged instructions */
17838@@ -192,7 +192,7 @@ struct pv_cpu_ops {
17839
17840 void (*start_context_switch)(struct task_struct *prev);
17841 void (*end_context_switch)(struct task_struct *next);
17842-};
17843+} __no_const __no_randomize_layout;
17844
17845 struct pv_irq_ops {
17846 /*
17847@@ -215,7 +215,7 @@ struct pv_irq_ops {
17848 #ifdef CONFIG_X86_64
17849 void (*adjust_exception_frame)(void);
17850 #endif
17851-};
17852+} __no_randomize_layout;
17853
17854 struct pv_apic_ops {
17855 #ifdef CONFIG_X86_LOCAL_APIC
17856@@ -223,7 +223,7 @@ struct pv_apic_ops {
17857 unsigned long start_eip,
17858 unsigned long start_esp);
17859 #endif
17860-};
17861+} __no_const __no_randomize_layout;
17862
17863 struct pv_mmu_ops {
17864 unsigned long (*read_cr2)(void);
17865@@ -313,6 +313,7 @@ struct pv_mmu_ops {
17866 struct paravirt_callee_save make_pud;
17867
17868 void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
17869+ void (*set_pgd_batched)(pgd_t *pudp, pgd_t pgdval);
17870 #endif /* PAGETABLE_LEVELS == 4 */
17871 #endif /* PAGETABLE_LEVELS >= 3 */
17872
17873@@ -324,7 +325,13 @@ struct pv_mmu_ops {
17874 an mfn. We can tell which is which from the index. */
17875 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
17876 phys_addr_t phys, pgprot_t flags);
17877-};
17878+
17879+#ifdef CONFIG_PAX_KERNEXEC
17880+ unsigned long (*pax_open_kernel)(void);
17881+ unsigned long (*pax_close_kernel)(void);
17882+#endif
17883+
17884+} __no_randomize_layout;
17885
17886 struct arch_spinlock;
17887 #ifdef CONFIG_SMP
17888@@ -336,11 +343,14 @@ typedef u16 __ticket_t;
17889 struct pv_lock_ops {
17890 struct paravirt_callee_save lock_spinning;
17891 void (*unlock_kick)(struct arch_spinlock *lock, __ticket_t ticket);
17892-};
17893+} __no_randomize_layout;
17894
17895 /* This contains all the paravirt structures: we get a convenient
17896 * number for each function using the offset which we use to indicate
17897- * what to patch. */
17898+ * what to patch.
17899+ * shouldn't be randomized due to the "NEAT TRICK" in paravirt.c
17900+ */
17901+
17902 struct paravirt_patch_template {
17903 struct pv_init_ops pv_init_ops;
17904 struct pv_time_ops pv_time_ops;
17905@@ -349,7 +359,7 @@ struct paravirt_patch_template {
17906 struct pv_apic_ops pv_apic_ops;
17907 struct pv_mmu_ops pv_mmu_ops;
17908 struct pv_lock_ops pv_lock_ops;
17909-};
17910+} __no_randomize_layout;
17911
17912 extern struct pv_info pv_info;
17913 extern struct pv_init_ops pv_init_ops;
17914diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h
17915index c4412e9..90e88c5 100644
17916--- a/arch/x86/include/asm/pgalloc.h
17917+++ b/arch/x86/include/asm/pgalloc.h
17918@@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(struct mm_struct *mm,
17919 pmd_t *pmd, pte_t *pte)
17920 {
17921 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
17922+ set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
17923+}
17924+
17925+static inline void pmd_populate_user(struct mm_struct *mm,
17926+ pmd_t *pmd, pte_t *pte)
17927+{
17928+ paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
17929 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
17930 }
17931
17932@@ -108,12 +115,22 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
17933
17934 #ifdef CONFIG_X86_PAE
17935 extern void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd);
17936+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
17937+{
17938+ pud_populate(mm, pudp, pmd);
17939+}
17940 #else /* !CONFIG_X86_PAE */
17941 static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
17942 {
17943 paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
17944 set_pud(pud, __pud(_PAGE_TABLE | __pa(pmd)));
17945 }
17946+
17947+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
17948+{
17949+ paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
17950+ set_pud(pud, __pud(_KERNPG_TABLE | __pa(pmd)));
17951+}
17952 #endif /* CONFIG_X86_PAE */
17953
17954 #if PAGETABLE_LEVELS > 3
17955@@ -123,6 +140,12 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
17956 set_pgd(pgd, __pgd(_PAGE_TABLE | __pa(pud)));
17957 }
17958
17959+static inline void pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
17960+{
17961+ paravirt_alloc_pud(mm, __pa(pud) >> PAGE_SHIFT);
17962+ set_pgd(pgd, __pgd(_KERNPG_TABLE | __pa(pud)));
17963+}
17964+
17965 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
17966 {
17967 return (pud_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
17968diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h
17969index 0d193e2..bf59aeb 100644
17970--- a/arch/x86/include/asm/pgtable-2level.h
17971+++ b/arch/x86/include/asm/pgtable-2level.h
17972@@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t *ptep , pte_t pte)
17973
17974 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
17975 {
17976+ pax_open_kernel();
17977 *pmdp = pmd;
17978+ pax_close_kernel();
17979 }
17980
17981 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
17982diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
17983index 81bb91b..9392125 100644
17984--- a/arch/x86/include/asm/pgtable-3level.h
17985+++ b/arch/x86/include/asm/pgtable-3level.h
17986@@ -92,12 +92,16 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
17987
17988 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
17989 {
17990+ pax_open_kernel();
17991 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
17992+ pax_close_kernel();
17993 }
17994
17995 static inline void native_set_pud(pud_t *pudp, pud_t pud)
17996 {
17997+ pax_open_kernel();
17998 set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
17999+ pax_close_kernel();
18000 }
18001
18002 /*
18003diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
18004index bbc8b12..f228861 100644
18005--- a/arch/x86/include/asm/pgtable.h
18006+++ b/arch/x86/include/asm/pgtable.h
18007@@ -45,6 +45,7 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
18008
18009 #ifndef __PAGETABLE_PUD_FOLDED
18010 #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
18011+#define set_pgd_batched(pgdp, pgd) native_set_pgd_batched(pgdp, pgd)
18012 #define pgd_clear(pgd) native_pgd_clear(pgd)
18013 #endif
18014
18015@@ -82,12 +83,51 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
18016
18017 #define arch_end_context_switch(prev) do {} while(0)
18018
18019+#define pax_open_kernel() native_pax_open_kernel()
18020+#define pax_close_kernel() native_pax_close_kernel()
18021 #endif /* CONFIG_PARAVIRT */
18022
18023+#define __HAVE_ARCH_PAX_OPEN_KERNEL
18024+#define __HAVE_ARCH_PAX_CLOSE_KERNEL
18025+
18026+#ifdef CONFIG_PAX_KERNEXEC
18027+static inline unsigned long native_pax_open_kernel(void)
18028+{
18029+ unsigned long cr0;
18030+
18031+ preempt_disable();
18032+ barrier();
18033+ cr0 = read_cr0() ^ X86_CR0_WP;
18034+ BUG_ON(cr0 & X86_CR0_WP);
18035+ write_cr0(cr0);
18036+ return cr0 ^ X86_CR0_WP;
18037+}
18038+
18039+static inline unsigned long native_pax_close_kernel(void)
18040+{
18041+ unsigned long cr0;
18042+
18043+ cr0 = read_cr0() ^ X86_CR0_WP;
18044+ BUG_ON(!(cr0 & X86_CR0_WP));
18045+ write_cr0(cr0);
18046+ barrier();
18047+ preempt_enable_no_resched();
18048+ return cr0 ^ X86_CR0_WP;
18049+}
18050+#else
18051+static inline unsigned long native_pax_open_kernel(void) { return 0; }
18052+static inline unsigned long native_pax_close_kernel(void) { return 0; }
18053+#endif
18054+
18055 /*
18056 * The following only work if pte_present() is true.
18057 * Undefined behaviour if not..
18058 */
18059+static inline int pte_user(pte_t pte)
18060+{
18061+ return pte_val(pte) & _PAGE_USER;
18062+}
18063+
18064 static inline int pte_dirty(pte_t pte)
18065 {
18066 return pte_flags(pte) & _PAGE_DIRTY;
18067@@ -148,6 +188,11 @@ static inline unsigned long pud_pfn(pud_t pud)
18068 return (pud_val(pud) & PTE_PFN_MASK) >> PAGE_SHIFT;
18069 }
18070
18071+static inline unsigned long pgd_pfn(pgd_t pgd)
18072+{
18073+ return (pgd_val(pgd) & PTE_PFN_MASK) >> PAGE_SHIFT;
18074+}
18075+
18076 #define pte_page(pte) pfn_to_page(pte_pfn(pte))
18077
18078 static inline int pmd_large(pmd_t pte)
18079@@ -201,9 +246,29 @@ static inline pte_t pte_wrprotect(pte_t pte)
18080 return pte_clear_flags(pte, _PAGE_RW);
18081 }
18082
18083+static inline pte_t pte_mkread(pte_t pte)
18084+{
18085+ return __pte(pte_val(pte) | _PAGE_USER);
18086+}
18087+
18088 static inline pte_t pte_mkexec(pte_t pte)
18089 {
18090- return pte_clear_flags(pte, _PAGE_NX);
18091+#ifdef CONFIG_X86_PAE
18092+ if (__supported_pte_mask & _PAGE_NX)
18093+ return pte_clear_flags(pte, _PAGE_NX);
18094+ else
18095+#endif
18096+ return pte_set_flags(pte, _PAGE_USER);
18097+}
18098+
18099+static inline pte_t pte_exprotect(pte_t pte)
18100+{
18101+#ifdef CONFIG_X86_PAE
18102+ if (__supported_pte_mask & _PAGE_NX)
18103+ return pte_set_flags(pte, _PAGE_NX);
18104+ else
18105+#endif
18106+ return pte_clear_flags(pte, _PAGE_USER);
18107 }
18108
18109 static inline pte_t pte_mkdirty(pte_t pte)
18110@@ -430,6 +495,16 @@ pte_t *populate_extra_pte(unsigned long vaddr);
18111 #endif
18112
18113 #ifndef __ASSEMBLY__
18114+
18115+#ifdef CONFIG_PAX_PER_CPU_PGD
18116+extern pgd_t cpu_pgd[NR_CPUS][2][PTRS_PER_PGD];
18117+enum cpu_pgd_type {kernel = 0, user = 1};
18118+static inline pgd_t *get_cpu_pgd(unsigned int cpu, enum cpu_pgd_type type)
18119+{
18120+ return cpu_pgd[cpu][type];
18121+}
18122+#endif
18123+
18124 #include <linux/mm_types.h>
18125 #include <linux/mmdebug.h>
18126 #include <linux/log2.h>
18127@@ -570,7 +645,7 @@ static inline unsigned long pud_page_vaddr(pud_t pud)
18128 * Currently stuck as a macro due to indirect forward reference to
18129 * linux/mmzone.h's __section_mem_map_addr() definition:
18130 */
18131-#define pud_page(pud) pfn_to_page(pud_val(pud) >> PAGE_SHIFT)
18132+#define pud_page(pud) pfn_to_page((pud_val(pud) & PTE_PFN_MASK) >> PAGE_SHIFT)
18133
18134 /* Find an entry in the second-level page table.. */
18135 static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
18136@@ -610,7 +685,7 @@ static inline unsigned long pgd_page_vaddr(pgd_t pgd)
18137 * Currently stuck as a macro due to indirect forward reference to
18138 * linux/mmzone.h's __section_mem_map_addr() definition:
18139 */
18140-#define pgd_page(pgd) pfn_to_page(pgd_val(pgd) >> PAGE_SHIFT)
18141+#define pgd_page(pgd) pfn_to_page((pgd_val(pgd) & PTE_PFN_MASK) >> PAGE_SHIFT)
18142
18143 /* to find an entry in a page-table-directory. */
18144 static inline unsigned long pud_index(unsigned long address)
18145@@ -625,7 +700,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
18146
18147 static inline int pgd_bad(pgd_t pgd)
18148 {
18149- return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
18150+ return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
18151 }
18152
18153 static inline int pgd_none(pgd_t pgd)
18154@@ -648,7 +723,12 @@ static inline int pgd_none(pgd_t pgd)
18155 * pgd_offset() returns a (pgd_t *)
18156 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
18157 */
18158-#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
18159+#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
18160+
18161+#ifdef CONFIG_PAX_PER_CPU_PGD
18162+#define pgd_offset_cpu(cpu, type, address) (get_cpu_pgd(cpu, type) + pgd_index(address))
18163+#endif
18164+
18165 /*
18166 * a shortcut which implies the use of the kernel's pgd, instead
18167 * of a process's
18168@@ -659,6 +739,23 @@ static inline int pgd_none(pgd_t pgd)
18169 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
18170 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
18171
18172+#ifdef CONFIG_X86_32
18173+#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
18174+#else
18175+#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
18176+#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
18177+
18178+#ifdef CONFIG_PAX_MEMORY_UDEREF
18179+#ifdef __ASSEMBLY__
18180+#define pax_user_shadow_base pax_user_shadow_base(%rip)
18181+#else
18182+extern unsigned long pax_user_shadow_base;
18183+extern pgdval_t clone_pgd_mask;
18184+#endif
18185+#endif
18186+
18187+#endif
18188+
18189 #ifndef __ASSEMBLY__
18190
18191 extern int direct_gbpages;
18192@@ -825,11 +922,24 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm,
18193 * dst and src can be on the same page, but the range must not overlap,
18194 * and must not cross a page boundary.
18195 */
18196-static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
18197+static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
18198 {
18199- memcpy(dst, src, count * sizeof(pgd_t));
18200+ pax_open_kernel();
18201+ while (count--)
18202+ *dst++ = *src++;
18203+ pax_close_kernel();
18204 }
18205
18206+#ifdef CONFIG_PAX_PER_CPU_PGD
18207+extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src);
18208+#endif
18209+
18210+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18211+extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src);
18212+#else
18213+static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src) {}
18214+#endif
18215+
18216 #define PTE_SHIFT ilog2(PTRS_PER_PTE)
18217 static inline int page_level_shift(enum pg_level level)
18218 {
18219diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
18220index 9ee3221..b979c6b 100644
18221--- a/arch/x86/include/asm/pgtable_32.h
18222+++ b/arch/x86/include/asm/pgtable_32.h
18223@@ -25,9 +25,6 @@
18224 struct mm_struct;
18225 struct vm_area_struct;
18226
18227-extern pgd_t swapper_pg_dir[1024];
18228-extern pgd_t initial_page_table[1024];
18229-
18230 static inline void pgtable_cache_init(void) { }
18231 static inline void check_pgt_cache(void) { }
18232 void paging_init(void);
18233@@ -48,6 +45,12 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
18234 # include <asm/pgtable-2level.h>
18235 #endif
18236
18237+extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
18238+extern pgd_t initial_page_table[PTRS_PER_PGD];
18239+#ifdef CONFIG_X86_PAE
18240+extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
18241+#endif
18242+
18243 #if defined(CONFIG_HIGHPTE)
18244 #define pte_offset_map(dir, address) \
18245 ((pte_t *)kmap_atomic(pmd_page(*(dir))) + \
18246@@ -62,12 +65,17 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
18247 /* Clear a kernel PTE and flush it from the TLB */
18248 #define kpte_clear_flush(ptep, vaddr) \
18249 do { \
18250+ pax_open_kernel(); \
18251 pte_clear(&init_mm, (vaddr), (ptep)); \
18252+ pax_close_kernel(); \
18253 __flush_tlb_one((vaddr)); \
18254 } while (0)
18255
18256 #endif /* !__ASSEMBLY__ */
18257
18258+#define HAVE_ARCH_UNMAPPED_AREA
18259+#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
18260+
18261 /*
18262 * kern_addr_valid() is (1) for FLATMEM and (0) for
18263 * SPARSEMEM and DISCONTIGMEM
18264diff --git a/arch/x86/include/asm/pgtable_32_types.h b/arch/x86/include/asm/pgtable_32_types.h
18265index ed5903b..c7fe163 100644
18266--- a/arch/x86/include/asm/pgtable_32_types.h
18267+++ b/arch/x86/include/asm/pgtable_32_types.h
18268@@ -8,7 +8,7 @@
18269 */
18270 #ifdef CONFIG_X86_PAE
18271 # include <asm/pgtable-3level_types.h>
18272-# define PMD_SIZE (1UL << PMD_SHIFT)
18273+# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
18274 # define PMD_MASK (~(PMD_SIZE - 1))
18275 #else
18276 # include <asm/pgtable-2level_types.h>
18277@@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set once high_memory is set */
18278 # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
18279 #endif
18280
18281+#ifdef CONFIG_PAX_KERNEXEC
18282+#ifndef __ASSEMBLY__
18283+extern unsigned char MODULES_EXEC_VADDR[];
18284+extern unsigned char MODULES_EXEC_END[];
18285+#endif
18286+#include <asm/boot.h>
18287+#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
18288+#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
18289+#else
18290+#define ktla_ktva(addr) (addr)
18291+#define ktva_ktla(addr) (addr)
18292+#endif
18293+
18294 #define MODULES_VADDR VMALLOC_START
18295 #define MODULES_END VMALLOC_END
18296 #define MODULES_LEN (MODULES_VADDR - MODULES_END)
18297diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
18298index e22c1db..23a625a 100644
18299--- a/arch/x86/include/asm/pgtable_64.h
18300+++ b/arch/x86/include/asm/pgtable_64.h
18301@@ -16,10 +16,14 @@
18302
18303 extern pud_t level3_kernel_pgt[512];
18304 extern pud_t level3_ident_pgt[512];
18305+extern pud_t level3_vmalloc_start_pgt[512];
18306+extern pud_t level3_vmalloc_end_pgt[512];
18307+extern pud_t level3_vmemmap_pgt[512];
18308+extern pud_t level2_vmemmap_pgt[512];
18309 extern pmd_t level2_kernel_pgt[512];
18310 extern pmd_t level2_fixmap_pgt[512];
18311-extern pmd_t level2_ident_pgt[512];
18312-extern pgd_t init_level4_pgt[];
18313+extern pmd_t level2_ident_pgt[512*2];
18314+extern pgd_t init_level4_pgt[512];
18315
18316 #define swapper_pg_dir init_level4_pgt
18317
18318@@ -61,7 +65,9 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
18319
18320 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
18321 {
18322+ pax_open_kernel();
18323 *pmdp = pmd;
18324+ pax_close_kernel();
18325 }
18326
18327 static inline void native_pmd_clear(pmd_t *pmd)
18328@@ -97,7 +103,9 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
18329
18330 static inline void native_set_pud(pud_t *pudp, pud_t pud)
18331 {
18332+ pax_open_kernel();
18333 *pudp = pud;
18334+ pax_close_kernel();
18335 }
18336
18337 static inline void native_pud_clear(pud_t *pud)
18338@@ -107,6 +115,13 @@ static inline void native_pud_clear(pud_t *pud)
18339
18340 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
18341 {
18342+ pax_open_kernel();
18343+ *pgdp = pgd;
18344+ pax_close_kernel();
18345+}
18346+
18347+static inline void native_set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
18348+{
18349 *pgdp = pgd;
18350 }
18351
18352diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
18353index c883bf7..19970b3 100644
18354--- a/arch/x86/include/asm/pgtable_64_types.h
18355+++ b/arch/x86/include/asm/pgtable_64_types.h
18356@@ -61,6 +61,11 @@ typedef struct { pteval_t pte; } pte_t;
18357 #define MODULES_VADDR (__START_KERNEL_map + KERNEL_IMAGE_SIZE)
18358 #define MODULES_END _AC(0xffffffffff000000, UL)
18359 #define MODULES_LEN (MODULES_END - MODULES_VADDR)
18360+#define MODULES_EXEC_VADDR MODULES_VADDR
18361+#define MODULES_EXEC_END MODULES_END
18362+
18363+#define ktla_ktva(addr) (addr)
18364+#define ktva_ktla(addr) (addr)
18365
18366 #define EARLY_DYNAMIC_PAGE_TABLES 64
18367
18368diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
18369index 94e40f1..ebd03e4 100644
18370--- a/arch/x86/include/asm/pgtable_types.h
18371+++ b/arch/x86/include/asm/pgtable_types.h
18372@@ -16,13 +16,12 @@
18373 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
18374 #define _PAGE_BIT_PAT 7 /* on 4KB pages */
18375 #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
18376-#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
18377+#define _PAGE_BIT_SPECIAL 9 /* special mappings, no associated struct page */
18378 #define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
18379 #define _PAGE_BIT_HIDDEN 11 /* hidden by kmemcheck */
18380 #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
18381-#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
18382-#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
18383-#define _PAGE_BIT_SPLITTING _PAGE_BIT_UNUSED1 /* only valid on a PSE pmd */
18384+#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SPECIAL
18385+#define _PAGE_BIT_SPLITTING _PAGE_BIT_SPECIAL /* only valid on a PSE pmd */
18386 #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
18387
18388 /* If _PAGE_BIT_PRESENT is clear, we use these: */
18389@@ -40,7 +39,6 @@
18390 #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
18391 #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
18392 #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
18393-#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
18394 #define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
18395 #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
18396 #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
18397@@ -87,8 +85,10 @@
18398
18399 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
18400 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
18401-#else
18402+#elif defined(CONFIG_KMEMCHECK) || defined(CONFIG_MEM_SOFT_DIRTY)
18403 #define _PAGE_NX (_AT(pteval_t, 0))
18404+#else
18405+#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
18406 #endif
18407
18408 #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
18409@@ -147,6 +147,9 @@
18410 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
18411 _PAGE_ACCESSED)
18412
18413+#define PAGE_READONLY_NOEXEC PAGE_READONLY
18414+#define PAGE_SHARED_NOEXEC PAGE_SHARED
18415+
18416 #define __PAGE_KERNEL_EXEC \
18417 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
18418 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
18419@@ -157,7 +160,7 @@
18420 #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
18421 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
18422 #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
18423-#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
18424+#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
18425 #define __PAGE_KERNEL_VVAR (__PAGE_KERNEL_RO | _PAGE_USER)
18426 #define __PAGE_KERNEL_VVAR_NOCACHE (__PAGE_KERNEL_VVAR | _PAGE_PCD | _PAGE_PWT)
18427 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
18428@@ -219,8 +222,8 @@
18429 * bits are combined, this will alow user to access the high address mapped
18430 * VDSO in the presence of CONFIG_COMPAT_VDSO
18431 */
18432-#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
18433-#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
18434+#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
18435+#define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
18436 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
18437 #endif
18438
18439@@ -258,7 +261,17 @@ static inline pgdval_t pgd_flags(pgd_t pgd)
18440 {
18441 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
18442 }
18443+#endif
18444
18445+#if PAGETABLE_LEVELS == 3
18446+#include <asm-generic/pgtable-nopud.h>
18447+#endif
18448+
18449+#if PAGETABLE_LEVELS == 2
18450+#include <asm-generic/pgtable-nopmd.h>
18451+#endif
18452+
18453+#ifndef __ASSEMBLY__
18454 #if PAGETABLE_LEVELS > 3
18455 typedef struct { pudval_t pud; } pud_t;
18456
18457@@ -272,8 +285,6 @@ static inline pudval_t native_pud_val(pud_t pud)
18458 return pud.pud;
18459 }
18460 #else
18461-#include <asm-generic/pgtable-nopud.h>
18462-
18463 static inline pudval_t native_pud_val(pud_t pud)
18464 {
18465 return native_pgd_val(pud.pgd);
18466@@ -293,8 +304,6 @@ static inline pmdval_t native_pmd_val(pmd_t pmd)
18467 return pmd.pmd;
18468 }
18469 #else
18470-#include <asm-generic/pgtable-nopmd.h>
18471-
18472 static inline pmdval_t native_pmd_val(pmd_t pmd)
18473 {
18474 return native_pgd_val(pmd.pud.pgd);
18475@@ -334,7 +343,6 @@ typedef struct page *pgtable_t;
18476
18477 extern pteval_t __supported_pte_mask;
18478 extern void set_nx(void);
18479-extern int nx_enabled;
18480
18481 #define pgprot_writecombine pgprot_writecombine
18482 extern pgprot_t pgprot_writecombine(pgprot_t prot);
18483diff --git a/arch/x86/include/asm/preempt.h b/arch/x86/include/asm/preempt.h
18484index b39e194..9d44fd1 100644
18485--- a/arch/x86/include/asm/preempt.h
18486+++ b/arch/x86/include/asm/preempt.h
18487@@ -99,7 +99,7 @@ static __always_inline void __preempt_count_sub(int val)
18488 */
18489 static __always_inline bool __preempt_count_dec_and_test(void)
18490 {
18491- GEN_UNARY_RMWcc("decl", __preempt_count, __percpu_arg(0), "e");
18492+ GEN_UNARY_RMWcc("decl", "incl", __preempt_count, __percpu_arg(0), "e");
18493 }
18494
18495 /*
18496diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
18497index fdedd38..95c02c2 100644
18498--- a/arch/x86/include/asm/processor.h
18499+++ b/arch/x86/include/asm/processor.h
18500@@ -128,7 +128,7 @@ struct cpuinfo_x86 {
18501 /* Index into per_cpu list: */
18502 u16 cpu_index;
18503 u32 microcode;
18504-} __attribute__((__aligned__(SMP_CACHE_BYTES)));
18505+} __attribute__((__aligned__(SMP_CACHE_BYTES))) __randomize_layout;
18506
18507 #define X86_VENDOR_INTEL 0
18508 #define X86_VENDOR_CYRIX 1
18509@@ -199,9 +199,21 @@ static inline void native_cpuid(unsigned int *eax, unsigned int *ebx,
18510 : "memory");
18511 }
18512
18513+/* invpcid (%rdx),%rax */
18514+#define __ASM_INVPCID ".byte 0x66,0x0f,0x38,0x82,0x02"
18515+
18516+#define INVPCID_SINGLE_ADDRESS 0UL
18517+#define INVPCID_SINGLE_CONTEXT 1UL
18518+#define INVPCID_ALL_GLOBAL 2UL
18519+#define INVPCID_ALL_MONGLOBAL 3UL
18520+
18521+#define PCID_KERNEL 0UL
18522+#define PCID_USER 1UL
18523+#define PCID_NOFLUSH (1UL << 63)
18524+
18525 static inline void load_cr3(pgd_t *pgdir)
18526 {
18527- write_cr3(__pa(pgdir));
18528+ write_cr3(__pa(pgdir) | PCID_KERNEL);
18529 }
18530
18531 #ifdef CONFIG_X86_32
18532@@ -283,7 +295,7 @@ struct tss_struct {
18533
18534 } ____cacheline_aligned;
18535
18536-DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
18537+extern struct tss_struct init_tss[NR_CPUS];
18538
18539 /*
18540 * Save the original ist values for checking stack pointers during debugging
18541@@ -470,6 +482,7 @@ struct thread_struct {
18542 unsigned short ds;
18543 unsigned short fsindex;
18544 unsigned short gsindex;
18545+ unsigned short ss;
18546 #endif
18547 #ifdef CONFIG_X86_32
18548 unsigned long ip;
18549@@ -579,29 +592,8 @@ static inline void load_sp0(struct tss_struct *tss,
18550 extern unsigned long mmu_cr4_features;
18551 extern u32 *trampoline_cr4_features;
18552
18553-static inline void set_in_cr4(unsigned long mask)
18554-{
18555- unsigned long cr4;
18556-
18557- mmu_cr4_features |= mask;
18558- if (trampoline_cr4_features)
18559- *trampoline_cr4_features = mmu_cr4_features;
18560- cr4 = read_cr4();
18561- cr4 |= mask;
18562- write_cr4(cr4);
18563-}
18564-
18565-static inline void clear_in_cr4(unsigned long mask)
18566-{
18567- unsigned long cr4;
18568-
18569- mmu_cr4_features &= ~mask;
18570- if (trampoline_cr4_features)
18571- *trampoline_cr4_features = mmu_cr4_features;
18572- cr4 = read_cr4();
18573- cr4 &= ~mask;
18574- write_cr4(cr4);
18575-}
18576+extern void set_in_cr4(unsigned long mask);
18577+extern void clear_in_cr4(unsigned long mask);
18578
18579 typedef struct {
18580 unsigned long seg;
18581@@ -827,11 +819,18 @@ static inline void spin_lock_prefetch(const void *x)
18582 */
18583 #define TASK_SIZE PAGE_OFFSET
18584 #define TASK_SIZE_MAX TASK_SIZE
18585+
18586+#ifdef CONFIG_PAX_SEGMEXEC
18587+#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
18588+#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
18589+#else
18590 #define STACK_TOP TASK_SIZE
18591-#define STACK_TOP_MAX STACK_TOP
18592+#endif
18593+
18594+#define STACK_TOP_MAX TASK_SIZE
18595
18596 #define INIT_THREAD { \
18597- .sp0 = sizeof(init_stack) + (long)&init_stack, \
18598+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
18599 .vm86_info = NULL, \
18600 .sysenter_cs = __KERNEL_CS, \
18601 .io_bitmap_ptr = NULL, \
18602@@ -845,7 +844,7 @@ static inline void spin_lock_prefetch(const void *x)
18603 */
18604 #define INIT_TSS { \
18605 .x86_tss = { \
18606- .sp0 = sizeof(init_stack) + (long)&init_stack, \
18607+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
18608 .ss0 = __KERNEL_DS, \
18609 .ss1 = __KERNEL_CS, \
18610 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
18611@@ -856,11 +855,7 @@ static inline void spin_lock_prefetch(const void *x)
18612 extern unsigned long thread_saved_pc(struct task_struct *tsk);
18613
18614 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
18615-#define KSTK_TOP(info) \
18616-({ \
18617- unsigned long *__ptr = (unsigned long *)(info); \
18618- (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
18619-})
18620+#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0)
18621
18622 /*
18623 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
18624@@ -875,7 +870,7 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
18625 #define task_pt_regs(task) \
18626 ({ \
18627 struct pt_regs *__regs__; \
18628- __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
18629+ __regs__ = (struct pt_regs *)((task)->thread.sp0); \
18630 __regs__ - 1; \
18631 })
18632
18633@@ -885,13 +880,13 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
18634 /*
18635 * User space process size. 47bits minus one guard page.
18636 */
18637-#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
18638+#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
18639
18640 /* This decides where the kernel will search for a free chunk of vm
18641 * space during mmap's.
18642 */
18643 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
18644- 0xc0000000 : 0xFFFFe000)
18645+ 0xc0000000 : 0xFFFFf000)
18646
18647 #define TASK_SIZE (test_thread_flag(TIF_ADDR32) ? \
18648 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
18649@@ -902,11 +897,11 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
18650 #define STACK_TOP_MAX TASK_SIZE_MAX
18651
18652 #define INIT_THREAD { \
18653- .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
18654+ .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
18655 }
18656
18657 #define INIT_TSS { \
18658- .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
18659+ .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
18660 }
18661
18662 /*
18663@@ -934,6 +929,10 @@ extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
18664 */
18665 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
18666
18667+#ifdef CONFIG_PAX_SEGMEXEC
18668+#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
18669+#endif
18670+
18671 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
18672
18673 /* Get/set a process' ability to use the timestamp counter instruction */
18674@@ -960,7 +959,7 @@ static inline uint32_t hypervisor_cpuid_base(const char *sig, uint32_t leaves)
18675 return 0;
18676 }
18677
18678-extern unsigned long arch_align_stack(unsigned long sp);
18679+#define arch_align_stack(x) ((x) & ~0xfUL)
18680 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
18681
18682 void default_idle(void);
18683@@ -970,6 +969,6 @@ bool xen_set_default_idle(void);
18684 #define xen_set_default_idle 0
18685 #endif
18686
18687-void stop_this_cpu(void *dummy);
18688+void stop_this_cpu(void *dummy) __noreturn;
18689 void df_debug(struct pt_regs *regs, long error_code);
18690 #endif /* _ASM_X86_PROCESSOR_H */
18691diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
18692index 14fd6fd..6740420 100644
18693--- a/arch/x86/include/asm/ptrace.h
18694+++ b/arch/x86/include/asm/ptrace.h
18695@@ -84,28 +84,29 @@ static inline unsigned long regs_return_value(struct pt_regs *regs)
18696 }
18697
18698 /*
18699- * user_mode_vm(regs) determines whether a register set came from user mode.
18700+ * user_mode(regs) determines whether a register set came from user mode.
18701 * This is true if V8086 mode was enabled OR if the register set was from
18702 * protected mode with RPL-3 CS value. This tricky test checks that with
18703 * one comparison. Many places in the kernel can bypass this full check
18704- * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
18705+ * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
18706+ * be used.
18707 */
18708-static inline int user_mode(struct pt_regs *regs)
18709+static inline int user_mode_novm(struct pt_regs *regs)
18710 {
18711 #ifdef CONFIG_X86_32
18712 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
18713 #else
18714- return !!(regs->cs & 3);
18715+ return !!(regs->cs & SEGMENT_RPL_MASK);
18716 #endif
18717 }
18718
18719-static inline int user_mode_vm(struct pt_regs *regs)
18720+static inline int user_mode(struct pt_regs *regs)
18721 {
18722 #ifdef CONFIG_X86_32
18723 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
18724 USER_RPL;
18725 #else
18726- return user_mode(regs);
18727+ return user_mode_novm(regs);
18728 #endif
18729 }
18730
18731@@ -121,15 +122,16 @@ static inline int v8086_mode(struct pt_regs *regs)
18732 #ifdef CONFIG_X86_64
18733 static inline bool user_64bit_mode(struct pt_regs *regs)
18734 {
18735+ unsigned long cs = regs->cs & 0xffff;
18736 #ifndef CONFIG_PARAVIRT
18737 /*
18738 * On non-paravirt systems, this is the only long mode CPL 3
18739 * selector. We do not allow long mode selectors in the LDT.
18740 */
18741- return regs->cs == __USER_CS;
18742+ return cs == __USER_CS;
18743 #else
18744 /* Headers are too twisted for this to go in paravirt.h. */
18745- return regs->cs == __USER_CS || regs->cs == pv_info.extra_user_64bit_cs;
18746+ return cs == __USER_CS || cs == pv_info.extra_user_64bit_cs;
18747 #endif
18748 }
18749
18750@@ -180,9 +182,11 @@ static inline unsigned long regs_get_register(struct pt_regs *regs,
18751 * Traps from the kernel do not save sp and ss.
18752 * Use the helper function to retrieve sp.
18753 */
18754- if (offset == offsetof(struct pt_regs, sp) &&
18755- regs->cs == __KERNEL_CS)
18756- return kernel_stack_pointer(regs);
18757+ if (offset == offsetof(struct pt_regs, sp)) {
18758+ unsigned long cs = regs->cs & 0xffff;
18759+ if (cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS)
18760+ return kernel_stack_pointer(regs);
18761+ }
18762 #endif
18763 return *(unsigned long *)((unsigned long)regs + offset);
18764 }
18765diff --git a/arch/x86/include/asm/realmode.h b/arch/x86/include/asm/realmode.h
18766index 9c6b890..5305f53 100644
18767--- a/arch/x86/include/asm/realmode.h
18768+++ b/arch/x86/include/asm/realmode.h
18769@@ -22,16 +22,14 @@ struct real_mode_header {
18770 #endif
18771 /* APM/BIOS reboot */
18772 u32 machine_real_restart_asm;
18773-#ifdef CONFIG_X86_64
18774 u32 machine_real_restart_seg;
18775-#endif
18776 };
18777
18778 /* This must match data at trampoline_32/64.S */
18779 struct trampoline_header {
18780 #ifdef CONFIG_X86_32
18781 u32 start;
18782- u16 gdt_pad;
18783+ u16 boot_cs;
18784 u16 gdt_limit;
18785 u32 gdt_base;
18786 #else
18787diff --git a/arch/x86/include/asm/reboot.h b/arch/x86/include/asm/reboot.h
18788index a82c4f1..ac45053 100644
18789--- a/arch/x86/include/asm/reboot.h
18790+++ b/arch/x86/include/asm/reboot.h
18791@@ -6,13 +6,13 @@
18792 struct pt_regs;
18793
18794 struct machine_ops {
18795- void (*restart)(char *cmd);
18796- void (*halt)(void);
18797- void (*power_off)(void);
18798+ void (* __noreturn restart)(char *cmd);
18799+ void (* __noreturn halt)(void);
18800+ void (* __noreturn power_off)(void);
18801 void (*shutdown)(void);
18802 void (*crash_shutdown)(struct pt_regs *);
18803- void (*emergency_restart)(void);
18804-};
18805+ void (* __noreturn emergency_restart)(void);
18806+} __no_const;
18807
18808 extern struct machine_ops machine_ops;
18809
18810diff --git a/arch/x86/include/asm/rmwcc.h b/arch/x86/include/asm/rmwcc.h
18811index 8f7866a..e442f20 100644
18812--- a/arch/x86/include/asm/rmwcc.h
18813+++ b/arch/x86/include/asm/rmwcc.h
18814@@ -3,7 +3,34 @@
18815
18816 #ifdef CC_HAVE_ASM_GOTO
18817
18818-#define __GEN_RMWcc(fullop, var, cc, ...) \
18819+#ifdef CONFIG_PAX_REFCOUNT
18820+#define __GEN_RMWcc(fullop, fullantiop, var, cc, ...) \
18821+do { \
18822+ asm_volatile_goto (fullop \
18823+ ";jno 0f\n" \
18824+ fullantiop \
18825+ ";int $4\n0:\n" \
18826+ _ASM_EXTABLE(0b, 0b) \
18827+ ";j" cc " %l[cc_label]" \
18828+ : : "m" (var), ## __VA_ARGS__ \
18829+ : "memory" : cc_label); \
18830+ return 0; \
18831+cc_label: \
18832+ return 1; \
18833+} while (0)
18834+#else
18835+#define __GEN_RMWcc(fullop, fullantiop, var, cc, ...) \
18836+do { \
18837+ asm_volatile_goto (fullop ";j" cc " %l[cc_label]" \
18838+ : : "m" (var), ## __VA_ARGS__ \
18839+ : "memory" : cc_label); \
18840+ return 0; \
18841+cc_label: \
18842+ return 1; \
18843+} while (0)
18844+#endif
18845+
18846+#define __GEN_RMWcc_unchecked(fullop, var, cc, ...) \
18847 do { \
18848 asm_volatile_goto (fullop "; j" cc " %l[cc_label]" \
18849 : : "m" (var), ## __VA_ARGS__ \
18850@@ -13,15 +40,46 @@ cc_label: \
18851 return 1; \
18852 } while (0)
18853
18854-#define GEN_UNARY_RMWcc(op, var, arg0, cc) \
18855- __GEN_RMWcc(op " " arg0, var, cc)
18856+#define GEN_UNARY_RMWcc(op, antiop, var, arg0, cc) \
18857+ __GEN_RMWcc(op " " arg0, antiop " " arg0, var, cc)
18858
18859-#define GEN_BINARY_RMWcc(op, var, vcon, val, arg0, cc) \
18860- __GEN_RMWcc(op " %1, " arg0, var, cc, vcon (val))
18861+#define GEN_UNARY_RMWcc_unchecked(op, var, arg0, cc) \
18862+ __GEN_RMWcc_unchecked(op " " arg0, var, cc)
18863+
18864+#define GEN_BINARY_RMWcc(op, antiop, var, vcon, val, arg0, cc) \
18865+ __GEN_RMWcc(op " %1, " arg0, antiop " %1, " arg0, var, cc, vcon (val))
18866+
18867+#define GEN_BINARY_RMWcc_unchecked(op, var, vcon, val, arg0, cc) \
18868+ __GEN_RMWcc_unchecked(op " %1, " arg0, var, cc, vcon (val))
18869
18870 #else /* !CC_HAVE_ASM_GOTO */
18871
18872-#define __GEN_RMWcc(fullop, var, cc, ...) \
18873+#ifdef CONFIG_PAX_REFCOUNT
18874+#define __GEN_RMWcc(fullop, fullantiop, var, cc, ...) \
18875+do { \
18876+ char c; \
18877+ asm volatile (fullop \
18878+ ";jno 0f\n" \
18879+ fullantiop \
18880+ ";int $4\n0:\n" \
18881+ _ASM_EXTABLE(0b, 0b) \
18882+ "; set" cc " %1" \
18883+ : "+m" (var), "=qm" (c) \
18884+ : __VA_ARGS__ : "memory"); \
18885+ return c != 0; \
18886+} while (0)
18887+#else
18888+#define __GEN_RMWcc(fullop, fullantiop, var, cc, ...) \
18889+do { \
18890+ char c; \
18891+ asm volatile (fullop "; set" cc " %1" \
18892+ : "+m" (var), "=qm" (c) \
18893+ : __VA_ARGS__ : "memory"); \
18894+ return c != 0; \
18895+} while (0)
18896+#endif
18897+
18898+#define __GEN_RMWcc_unchecked(fullop, var, cc, ...) \
18899 do { \
18900 char c; \
18901 asm volatile (fullop "; set" cc " %1" \
18902@@ -30,11 +88,17 @@ do { \
18903 return c != 0; \
18904 } while (0)
18905
18906-#define GEN_UNARY_RMWcc(op, var, arg0, cc) \
18907- __GEN_RMWcc(op " " arg0, var, cc)
18908+#define GEN_UNARY_RMWcc(op, antiop, var, arg0, cc) \
18909+ __GEN_RMWcc(op " " arg0, antiop " " arg0, var, cc)
18910+
18911+#define GEN_UNARY_RMWcc_unchecked(op, var, arg0, cc) \
18912+ __GEN_RMWcc_unchecked(op " " arg0, var, cc)
18913+
18914+#define GEN_BINARY_RMWcc(op, antiop, var, vcon, val, arg0, cc) \
18915+ __GEN_RMWcc(op " %2, " arg0, antiop " %2, " arg0, var, cc, vcon (val))
18916
18917-#define GEN_BINARY_RMWcc(op, var, vcon, val, arg0, cc) \
18918- __GEN_RMWcc(op " %2, " arg0, var, cc, vcon (val))
18919+#define GEN_BINARY_RMWcc_unchecked(op, var, vcon, val, arg0, cc) \
18920+ __GEN_RMWcc_unchecked(op " %2, " arg0, var, cc, vcon (val))
18921
18922 #endif /* CC_HAVE_ASM_GOTO */
18923
18924diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h
18925index cad82c9..2e5c5c1 100644
18926--- a/arch/x86/include/asm/rwsem.h
18927+++ b/arch/x86/include/asm/rwsem.h
18928@@ -64,6 +64,14 @@ static inline void __down_read(struct rw_semaphore *sem)
18929 {
18930 asm volatile("# beginning down_read\n\t"
18931 LOCK_PREFIX _ASM_INC "(%1)\n\t"
18932+
18933+#ifdef CONFIG_PAX_REFCOUNT
18934+ "jno 0f\n"
18935+ LOCK_PREFIX _ASM_DEC "(%1)\n"
18936+ "int $4\n0:\n"
18937+ _ASM_EXTABLE(0b, 0b)
18938+#endif
18939+
18940 /* adds 0x00000001 */
18941 " jns 1f\n"
18942 " call call_rwsem_down_read_failed\n"
18943@@ -85,6 +93,14 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
18944 "1:\n\t"
18945 " mov %1,%2\n\t"
18946 " add %3,%2\n\t"
18947+
18948+#ifdef CONFIG_PAX_REFCOUNT
18949+ "jno 0f\n"
18950+ "sub %3,%2\n"
18951+ "int $4\n0:\n"
18952+ _ASM_EXTABLE(0b, 0b)
18953+#endif
18954+
18955 " jle 2f\n\t"
18956 LOCK_PREFIX " cmpxchg %2,%0\n\t"
18957 " jnz 1b\n\t"
18958@@ -104,6 +120,14 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
18959 long tmp;
18960 asm volatile("# beginning down_write\n\t"
18961 LOCK_PREFIX " xadd %1,(%2)\n\t"
18962+
18963+#ifdef CONFIG_PAX_REFCOUNT
18964+ "jno 0f\n"
18965+ "mov %1,(%2)\n"
18966+ "int $4\n0:\n"
18967+ _ASM_EXTABLE(0b, 0b)
18968+#endif
18969+
18970 /* adds 0xffff0001, returns the old value */
18971 " test " __ASM_SEL(%w1,%k1) "," __ASM_SEL(%w1,%k1) "\n\t"
18972 /* was the active mask 0 before? */
18973@@ -155,6 +179,14 @@ static inline void __up_read(struct rw_semaphore *sem)
18974 long tmp;
18975 asm volatile("# beginning __up_read\n\t"
18976 LOCK_PREFIX " xadd %1,(%2)\n\t"
18977+
18978+#ifdef CONFIG_PAX_REFCOUNT
18979+ "jno 0f\n"
18980+ "mov %1,(%2)\n"
18981+ "int $4\n0:\n"
18982+ _ASM_EXTABLE(0b, 0b)
18983+#endif
18984+
18985 /* subtracts 1, returns the old value */
18986 " jns 1f\n\t"
18987 " call call_rwsem_wake\n" /* expects old value in %edx */
18988@@ -173,6 +205,14 @@ static inline void __up_write(struct rw_semaphore *sem)
18989 long tmp;
18990 asm volatile("# beginning __up_write\n\t"
18991 LOCK_PREFIX " xadd %1,(%2)\n\t"
18992+
18993+#ifdef CONFIG_PAX_REFCOUNT
18994+ "jno 0f\n"
18995+ "mov %1,(%2)\n"
18996+ "int $4\n0:\n"
18997+ _ASM_EXTABLE(0b, 0b)
18998+#endif
18999+
19000 /* subtracts 0xffff0001, returns the old value */
19001 " jns 1f\n\t"
19002 " call call_rwsem_wake\n" /* expects old value in %edx */
19003@@ -190,6 +230,14 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
19004 {
19005 asm volatile("# beginning __downgrade_write\n\t"
19006 LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
19007+
19008+#ifdef CONFIG_PAX_REFCOUNT
19009+ "jno 0f\n"
19010+ LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
19011+ "int $4\n0:\n"
19012+ _ASM_EXTABLE(0b, 0b)
19013+#endif
19014+
19015 /*
19016 * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
19017 * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
19018@@ -208,7 +256,15 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
19019 */
19020 static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
19021 {
19022- asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
19023+ asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
19024+
19025+#ifdef CONFIG_PAX_REFCOUNT
19026+ "jno 0f\n"
19027+ LOCK_PREFIX _ASM_SUB "%1,%0\n"
19028+ "int $4\n0:\n"
19029+ _ASM_EXTABLE(0b, 0b)
19030+#endif
19031+
19032 : "+m" (sem->count)
19033 : "er" (delta));
19034 }
19035@@ -218,7 +274,7 @@ static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
19036 */
19037 static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
19038 {
19039- return delta + xadd(&sem->count, delta);
19040+ return delta + xadd_check_overflow(&sem->count, delta);
19041 }
19042
19043 #endif /* __KERNEL__ */
19044diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h
19045index 6f1c3a8..7744f19 100644
19046--- a/arch/x86/include/asm/segment.h
19047+++ b/arch/x86/include/asm/segment.h
19048@@ -64,10 +64,15 @@
19049 * 26 - ESPFIX small SS
19050 * 27 - per-cpu [ offset to per-cpu data area ]
19051 * 28 - stack_canary-20 [ for stack protector ]
19052- * 29 - unused
19053- * 30 - unused
19054+ * 29 - PCI BIOS CS
19055+ * 30 - PCI BIOS DS
19056 * 31 - TSS for double fault handler
19057 */
19058+#define GDT_ENTRY_KERNEXEC_EFI_CS (1)
19059+#define GDT_ENTRY_KERNEXEC_EFI_DS (2)
19060+#define __KERNEXEC_EFI_CS (GDT_ENTRY_KERNEXEC_EFI_CS*8)
19061+#define __KERNEXEC_EFI_DS (GDT_ENTRY_KERNEXEC_EFI_DS*8)
19062+
19063 #define GDT_ENTRY_TLS_MIN 6
19064 #define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
19065
19066@@ -79,6 +84,8 @@
19067
19068 #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE+0)
19069
19070+#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
19071+
19072 #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE+1)
19073
19074 #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE+4)
19075@@ -104,6 +111,12 @@
19076 #define __KERNEL_STACK_CANARY 0
19077 #endif
19078
19079+#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE+17)
19080+#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
19081+
19082+#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE+18)
19083+#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
19084+
19085 #define GDT_ENTRY_DOUBLEFAULT_TSS 31
19086
19087 /*
19088@@ -141,7 +154,7 @@
19089 */
19090
19091 /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
19092-#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
19093+#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
19094
19095
19096 #else
19097@@ -165,6 +178,8 @@
19098 #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS*8+3)
19099 #define __USER32_DS __USER_DS
19100
19101+#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
19102+
19103 #define GDT_ENTRY_TSS 8 /* needs two entries */
19104 #define GDT_ENTRY_LDT 10 /* needs two entries */
19105 #define GDT_ENTRY_TLS_MIN 12
19106@@ -173,6 +188,8 @@
19107 #define GDT_ENTRY_PER_CPU 15 /* Abused to load per CPU data from limit */
19108 #define __PER_CPU_SEG (GDT_ENTRY_PER_CPU * 8 + 3)
19109
19110+#define GDT_ENTRY_UDEREF_KERNEL_DS 16
19111+
19112 /* TLS indexes for 64bit - hardcoded in arch_prctl */
19113 #define FS_TLS 0
19114 #define GS_TLS 1
19115@@ -180,12 +197,14 @@
19116 #define GS_TLS_SEL ((GDT_ENTRY_TLS_MIN+GS_TLS)*8 + 3)
19117 #define FS_TLS_SEL ((GDT_ENTRY_TLS_MIN+FS_TLS)*8 + 3)
19118
19119-#define GDT_ENTRIES 16
19120+#define GDT_ENTRIES 17
19121
19122 #endif
19123
19124 #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS*8)
19125+#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS*8)
19126 #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS*8)
19127+#define __UDEREF_KERNEL_DS (GDT_ENTRY_UDEREF_KERNEL_DS*8)
19128 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS*8+3)
19129 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS*8+3)
19130 #ifndef CONFIG_PARAVIRT
19131@@ -268,7 +287,7 @@ static inline unsigned long get_limit(unsigned long segment)
19132 {
19133 unsigned long __limit;
19134 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
19135- return __limit + 1;
19136+ return __limit;
19137 }
19138
19139 #endif /* !__ASSEMBLY__ */
19140diff --git a/arch/x86/include/asm/smap.h b/arch/x86/include/asm/smap.h
19141index 8d3120f..352b440 100644
19142--- a/arch/x86/include/asm/smap.h
19143+++ b/arch/x86/include/asm/smap.h
19144@@ -25,11 +25,40 @@
19145
19146 #include <asm/alternative-asm.h>
19147
19148+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19149+#define ASM_PAX_OPEN_USERLAND \
19150+ 661: jmp 663f; \
19151+ .pushsection .altinstr_replacement, "a" ; \
19152+ 662: pushq %rax; nop; \
19153+ .popsection ; \
19154+ .pushsection .altinstructions, "a" ; \
19155+ altinstruction_entry 661b, 662b, X86_FEATURE_STRONGUDEREF, 2, 2;\
19156+ .popsection ; \
19157+ call __pax_open_userland; \
19158+ popq %rax; \
19159+ 663:
19160+
19161+#define ASM_PAX_CLOSE_USERLAND \
19162+ 661: jmp 663f; \
19163+ .pushsection .altinstr_replacement, "a" ; \
19164+ 662: pushq %rax; nop; \
19165+ .popsection; \
19166+ .pushsection .altinstructions, "a" ; \
19167+ altinstruction_entry 661b, 662b, X86_FEATURE_STRONGUDEREF, 2, 2;\
19168+ .popsection; \
19169+ call __pax_close_userland; \
19170+ popq %rax; \
19171+ 663:
19172+#else
19173+#define ASM_PAX_OPEN_USERLAND
19174+#define ASM_PAX_CLOSE_USERLAND
19175+#endif
19176+
19177 #ifdef CONFIG_X86_SMAP
19178
19179 #define ASM_CLAC \
19180 661: ASM_NOP3 ; \
19181- .pushsection .altinstr_replacement, "ax" ; \
19182+ .pushsection .altinstr_replacement, "a" ; \
19183 662: __ASM_CLAC ; \
19184 .popsection ; \
19185 .pushsection .altinstructions, "a" ; \
19186@@ -38,7 +67,7 @@
19187
19188 #define ASM_STAC \
19189 661: ASM_NOP3 ; \
19190- .pushsection .altinstr_replacement, "ax" ; \
19191+ .pushsection .altinstr_replacement, "a" ; \
19192 662: __ASM_STAC ; \
19193 .popsection ; \
19194 .pushsection .altinstructions, "a" ; \
19195@@ -56,6 +85,37 @@
19196
19197 #include <asm/alternative.h>
19198
19199+#define __HAVE_ARCH_PAX_OPEN_USERLAND
19200+#define __HAVE_ARCH_PAX_CLOSE_USERLAND
19201+
19202+extern void __pax_open_userland(void);
19203+static __always_inline unsigned long pax_open_userland(void)
19204+{
19205+
19206+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19207+ asm volatile(ALTERNATIVE(ASM_NOP5, "call %P[open]", X86_FEATURE_STRONGUDEREF)
19208+ :
19209+ : [open] "i" (__pax_open_userland)
19210+ : "memory", "rax");
19211+#endif
19212+
19213+ return 0;
19214+}
19215+
19216+extern void __pax_close_userland(void);
19217+static __always_inline unsigned long pax_close_userland(void)
19218+{
19219+
19220+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19221+ asm volatile(ALTERNATIVE(ASM_NOP5, "call %P[close]", X86_FEATURE_STRONGUDEREF)
19222+ :
19223+ : [close] "i" (__pax_close_userland)
19224+ : "memory", "rax");
19225+#endif
19226+
19227+ return 0;
19228+}
19229+
19230 #ifdef CONFIG_X86_SMAP
19231
19232 static __always_inline void clac(void)
19233diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
19234index 8cd27e0..7f05ec8 100644
19235--- a/arch/x86/include/asm/smp.h
19236+++ b/arch/x86/include/asm/smp.h
19237@@ -35,7 +35,7 @@ DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_core_map);
19238 /* cpus sharing the last level cache: */
19239 DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_llc_shared_map);
19240 DECLARE_PER_CPU_READ_MOSTLY(u16, cpu_llc_id);
19241-DECLARE_PER_CPU_READ_MOSTLY(int, cpu_number);
19242+DECLARE_PER_CPU_READ_MOSTLY(unsigned int, cpu_number);
19243
19244 static inline struct cpumask *cpu_sibling_mask(int cpu)
19245 {
19246@@ -78,7 +78,7 @@ struct smp_ops {
19247
19248 void (*send_call_func_ipi)(const struct cpumask *mask);
19249 void (*send_call_func_single_ipi)(int cpu);
19250-};
19251+} __no_const;
19252
19253 /* Globals due to paravirt */
19254 extern void set_cpu_sibling_map(int cpu);
19255@@ -190,14 +190,8 @@ extern unsigned disabled_cpus;
19256 extern int safe_smp_processor_id(void);
19257
19258 #elif defined(CONFIG_X86_64_SMP)
19259-#define raw_smp_processor_id() (this_cpu_read(cpu_number))
19260-
19261-#define stack_smp_processor_id() \
19262-({ \
19263- struct thread_info *ti; \
19264- __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
19265- ti->cpu; \
19266-})
19267+#define raw_smp_processor_id() (this_cpu_read(cpu_number))
19268+#define stack_smp_processor_id() raw_smp_processor_id()
19269 #define safe_smp_processor_id() smp_processor_id()
19270
19271 #endif
19272diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
19273index 0f62f54..cb5d0dd 100644
19274--- a/arch/x86/include/asm/spinlock.h
19275+++ b/arch/x86/include/asm/spinlock.h
19276@@ -222,6 +222,14 @@ static inline int arch_write_can_lock(arch_rwlock_t *lock)
19277 static inline void arch_read_lock(arch_rwlock_t *rw)
19278 {
19279 asm volatile(LOCK_PREFIX READ_LOCK_SIZE(dec) " (%0)\n\t"
19280+
19281+#ifdef CONFIG_PAX_REFCOUNT
19282+ "jno 0f\n"
19283+ LOCK_PREFIX READ_LOCK_SIZE(inc) " (%0)\n"
19284+ "int $4\n0:\n"
19285+ _ASM_EXTABLE(0b, 0b)
19286+#endif
19287+
19288 "jns 1f\n"
19289 "call __read_lock_failed\n\t"
19290 "1:\n"
19291@@ -231,6 +239,14 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
19292 static inline void arch_write_lock(arch_rwlock_t *rw)
19293 {
19294 asm volatile(LOCK_PREFIX WRITE_LOCK_SUB(%1) "(%0)\n\t"
19295+
19296+#ifdef CONFIG_PAX_REFCOUNT
19297+ "jno 0f\n"
19298+ LOCK_PREFIX WRITE_LOCK_ADD(%1) "(%0)\n"
19299+ "int $4\n0:\n"
19300+ _ASM_EXTABLE(0b, 0b)
19301+#endif
19302+
19303 "jz 1f\n"
19304 "call __write_lock_failed\n\t"
19305 "1:\n"
19306@@ -260,13 +276,29 @@ static inline int arch_write_trylock(arch_rwlock_t *lock)
19307
19308 static inline void arch_read_unlock(arch_rwlock_t *rw)
19309 {
19310- asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0"
19311+ asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0\n"
19312+
19313+#ifdef CONFIG_PAX_REFCOUNT
19314+ "jno 0f\n"
19315+ LOCK_PREFIX READ_LOCK_SIZE(dec) " %0\n"
19316+ "int $4\n0:\n"
19317+ _ASM_EXTABLE(0b, 0b)
19318+#endif
19319+
19320 :"+m" (rw->lock) : : "memory");
19321 }
19322
19323 static inline void arch_write_unlock(arch_rwlock_t *rw)
19324 {
19325- asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0"
19326+ asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0\n"
19327+
19328+#ifdef CONFIG_PAX_REFCOUNT
19329+ "jno 0f\n"
19330+ LOCK_PREFIX WRITE_LOCK_SUB(%1) "%0\n"
19331+ "int $4\n0:\n"
19332+ _ASM_EXTABLE(0b, 0b)
19333+#endif
19334+
19335 : "+m" (rw->write) : "i" (RW_LOCK_BIAS) : "memory");
19336 }
19337
19338diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
19339index 6a99859..03cb807 100644
19340--- a/arch/x86/include/asm/stackprotector.h
19341+++ b/arch/x86/include/asm/stackprotector.h
19342@@ -47,7 +47,7 @@
19343 * head_32 for boot CPU and setup_per_cpu_areas() for others.
19344 */
19345 #define GDT_STACK_CANARY_INIT \
19346- [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
19347+ [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17),
19348
19349 /*
19350 * Initialize the stackprotector canary value.
19351@@ -112,7 +112,7 @@ static inline void setup_stack_canary_segment(int cpu)
19352
19353 static inline void load_stack_canary_segment(void)
19354 {
19355-#ifdef CONFIG_X86_32
19356+#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
19357 asm volatile ("mov %0, %%gs" : : "r" (0));
19358 #endif
19359 }
19360diff --git a/arch/x86/include/asm/stacktrace.h b/arch/x86/include/asm/stacktrace.h
19361index 70bbe39..4ae2bd4 100644
19362--- a/arch/x86/include/asm/stacktrace.h
19363+++ b/arch/x86/include/asm/stacktrace.h
19364@@ -11,28 +11,20 @@
19365
19366 extern int kstack_depth_to_print;
19367
19368-struct thread_info;
19369+struct task_struct;
19370 struct stacktrace_ops;
19371
19372-typedef unsigned long (*walk_stack_t)(struct thread_info *tinfo,
19373- unsigned long *stack,
19374- unsigned long bp,
19375- const struct stacktrace_ops *ops,
19376- void *data,
19377- unsigned long *end,
19378- int *graph);
19379+typedef unsigned long walk_stack_t(struct task_struct *task,
19380+ void *stack_start,
19381+ unsigned long *stack,
19382+ unsigned long bp,
19383+ const struct stacktrace_ops *ops,
19384+ void *data,
19385+ unsigned long *end,
19386+ int *graph);
19387
19388-extern unsigned long
19389-print_context_stack(struct thread_info *tinfo,
19390- unsigned long *stack, unsigned long bp,
19391- const struct stacktrace_ops *ops, void *data,
19392- unsigned long *end, int *graph);
19393-
19394-extern unsigned long
19395-print_context_stack_bp(struct thread_info *tinfo,
19396- unsigned long *stack, unsigned long bp,
19397- const struct stacktrace_ops *ops, void *data,
19398- unsigned long *end, int *graph);
19399+extern walk_stack_t print_context_stack;
19400+extern walk_stack_t print_context_stack_bp;
19401
19402 /* Generic stack tracer with callbacks */
19403
19404@@ -40,7 +32,7 @@ struct stacktrace_ops {
19405 void (*address)(void *data, unsigned long address, int reliable);
19406 /* On negative return stop dumping */
19407 int (*stack)(void *data, char *name);
19408- walk_stack_t walk_stack;
19409+ walk_stack_t *walk_stack;
19410 };
19411
19412 void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
19413diff --git a/arch/x86/include/asm/switch_to.h b/arch/x86/include/asm/switch_to.h
19414index d7f3b3b..3cc39f1 100644
19415--- a/arch/x86/include/asm/switch_to.h
19416+++ b/arch/x86/include/asm/switch_to.h
19417@@ -108,7 +108,7 @@ do { \
19418 "call __switch_to\n\t" \
19419 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
19420 __switch_canary \
19421- "movq %P[thread_info](%%rsi),%%r8\n\t" \
19422+ "movq "__percpu_arg([thread_info])",%%r8\n\t" \
19423 "movq %%rax,%%rdi\n\t" \
19424 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
19425 "jnz ret_from_fork\n\t" \
19426@@ -119,7 +119,7 @@ do { \
19427 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
19428 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
19429 [_tif_fork] "i" (_TIF_FORK), \
19430- [thread_info] "i" (offsetof(struct task_struct, stack)), \
19431+ [thread_info] "m" (current_tinfo), \
19432 [current_task] "m" (current_task) \
19433 __switch_canary_iparam \
19434 : "memory", "cc" __EXTRA_CLOBBER)
19435diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
19436index e1940c0..ac50dd8 100644
19437--- a/arch/x86/include/asm/thread_info.h
19438+++ b/arch/x86/include/asm/thread_info.h
19439@@ -10,6 +10,7 @@
19440 #include <linux/compiler.h>
19441 #include <asm/page.h>
19442 #include <asm/types.h>
19443+#include <asm/percpu.h>
19444
19445 /*
19446 * low level task data that entry.S needs immediate access to
19447@@ -23,7 +24,6 @@ struct exec_domain;
19448 #include <linux/atomic.h>
19449
19450 struct thread_info {
19451- struct task_struct *task; /* main task structure */
19452 struct exec_domain *exec_domain; /* execution domain */
19453 __u32 flags; /* low level flags */
19454 __u32 status; /* thread synchronous flags */
19455@@ -32,19 +32,13 @@ struct thread_info {
19456 mm_segment_t addr_limit;
19457 struct restart_block restart_block;
19458 void __user *sysenter_return;
19459-#ifdef CONFIG_X86_32
19460- unsigned long previous_esp; /* ESP of the previous stack in
19461- case of nested (IRQ) stacks
19462- */
19463- __u8 supervisor_stack[0];
19464-#endif
19465+ unsigned long lowest_stack;
19466 unsigned int sig_on_uaccess_error:1;
19467 unsigned int uaccess_err:1; /* uaccess failed */
19468 };
19469
19470-#define INIT_THREAD_INFO(tsk) \
19471+#define INIT_THREAD_INFO \
19472 { \
19473- .task = &tsk, \
19474 .exec_domain = &default_exec_domain, \
19475 .flags = 0, \
19476 .cpu = 0, \
19477@@ -55,7 +49,7 @@ struct thread_info {
19478 }, \
19479 }
19480
19481-#define init_thread_info (init_thread_union.thread_info)
19482+#define init_thread_info (init_thread_union.stack)
19483 #define init_stack (init_thread_union.stack)
19484
19485 #else /* !__ASSEMBLY__ */
19486@@ -95,6 +89,7 @@ struct thread_info {
19487 #define TIF_SYSCALL_TRACEPOINT 28 /* syscall tracepoint instrumentation */
19488 #define TIF_ADDR32 29 /* 32-bit address space on 64 bits */
19489 #define TIF_X32 30 /* 32-bit native x86-64 binary */
19490+#define TIF_GRSEC_SETXID 31 /* update credentials on syscall entry/exit */
19491
19492 #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
19493 #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
19494@@ -118,17 +113,18 @@ struct thread_info {
19495 #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
19496 #define _TIF_ADDR32 (1 << TIF_ADDR32)
19497 #define _TIF_X32 (1 << TIF_X32)
19498+#define _TIF_GRSEC_SETXID (1 << TIF_GRSEC_SETXID)
19499
19500 /* work to do in syscall_trace_enter() */
19501 #define _TIF_WORK_SYSCALL_ENTRY \
19502 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_EMU | _TIF_SYSCALL_AUDIT | \
19503 _TIF_SECCOMP | _TIF_SINGLESTEP | _TIF_SYSCALL_TRACEPOINT | \
19504- _TIF_NOHZ)
19505+ _TIF_NOHZ | _TIF_GRSEC_SETXID)
19506
19507 /* work to do in syscall_trace_leave() */
19508 #define _TIF_WORK_SYSCALL_EXIT \
19509 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SINGLESTEP | \
19510- _TIF_SYSCALL_TRACEPOINT | _TIF_NOHZ)
19511+ _TIF_SYSCALL_TRACEPOINT | _TIF_NOHZ | _TIF_GRSEC_SETXID)
19512
19513 /* work to do on interrupt/exception return */
19514 #define _TIF_WORK_MASK \
19515@@ -139,7 +135,7 @@ struct thread_info {
19516 /* work to do on any return to user space */
19517 #define _TIF_ALLWORK_MASK \
19518 ((0x0000FFFF & ~_TIF_SECCOMP) | _TIF_SYSCALL_TRACEPOINT | \
19519- _TIF_NOHZ)
19520+ _TIF_NOHZ | _TIF_GRSEC_SETXID)
19521
19522 /* Only used for 64 bit */
19523 #define _TIF_DO_NOTIFY_MASK \
19524@@ -153,6 +149,23 @@ struct thread_info {
19525 #define _TIF_WORK_CTXSW_PREV (_TIF_WORK_CTXSW|_TIF_USER_RETURN_NOTIFY)
19526 #define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW)
19527
19528+#ifdef __ASSEMBLY__
19529+/* how to get the thread information struct from ASM */
19530+#define GET_THREAD_INFO(reg) \
19531+ mov PER_CPU_VAR(current_tinfo), reg
19532+
19533+/* use this one if reg already contains %esp */
19534+#define GET_THREAD_INFO_WITH_ESP(reg) GET_THREAD_INFO(reg)
19535+#else
19536+/* how to get the thread information struct from C */
19537+DECLARE_PER_CPU(struct thread_info *, current_tinfo);
19538+
19539+static __always_inline struct thread_info *current_thread_info(void)
19540+{
19541+ return this_cpu_read_stable(current_tinfo);
19542+}
19543+#endif
19544+
19545 #ifdef CONFIG_X86_32
19546
19547 #define STACK_WARN (THREAD_SIZE/8)
19548@@ -169,31 +182,10 @@ struct thread_info {
19549 sp; \
19550 })
19551
19552-/* how to get the thread information struct from C */
19553-static inline struct thread_info *current_thread_info(void)
19554-{
19555- return (struct thread_info *)
19556- (current_stack_pointer & ~(THREAD_SIZE - 1));
19557-}
19558-
19559-#else /* !__ASSEMBLY__ */
19560-
19561-/* how to get the thread information struct from ASM */
19562-#define GET_THREAD_INFO(reg) \
19563- movl $-THREAD_SIZE, reg; \
19564- andl %esp, reg
19565-
19566-/* use this one if reg already contains %esp */
19567-#define GET_THREAD_INFO_WITH_ESP(reg) \
19568- andl $-THREAD_SIZE, reg
19569-
19570 #endif
19571
19572 #else /* X86_32 */
19573
19574-#include <asm/percpu.h>
19575-#define KERNEL_STACK_OFFSET (5*8)
19576-
19577 /*
19578 * macros/functions for gaining access to the thread information structure
19579 * preempt_count needs to be 1 initially, until the scheduler is functional.
19580@@ -201,27 +193,8 @@ static inline struct thread_info *current_thread_info(void)
19581 #ifndef __ASSEMBLY__
19582 DECLARE_PER_CPU(unsigned long, kernel_stack);
19583
19584-static inline struct thread_info *current_thread_info(void)
19585-{
19586- struct thread_info *ti;
19587- ti = (void *)(this_cpu_read_stable(kernel_stack) +
19588- KERNEL_STACK_OFFSET - THREAD_SIZE);
19589- return ti;
19590-}
19591-
19592-#else /* !__ASSEMBLY__ */
19593-
19594-/* how to get the thread information struct from ASM */
19595-#define GET_THREAD_INFO(reg) \
19596- movq PER_CPU_VAR(kernel_stack),reg ; \
19597- subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg
19598-
19599-/*
19600- * Same if PER_CPU_VAR(kernel_stack) is, perhaps with some offset, already in
19601- * a certain register (to be used in assembler memory operands).
19602- */
19603-#define THREAD_INFO(reg, off) KERNEL_STACK_OFFSET+(off)-THREAD_SIZE(reg)
19604-
19605+/* how to get the current stack pointer from C */
19606+register unsigned long current_stack_pointer asm("rsp") __used;
19607 #endif
19608
19609 #endif /* !X86_32 */
19610@@ -280,5 +253,12 @@ static inline bool is_ia32_task(void)
19611 extern void arch_task_cache_init(void);
19612 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
19613 extern void arch_release_task_struct(struct task_struct *tsk);
19614+
19615+#define __HAVE_THREAD_FUNCTIONS
19616+#define task_thread_info(task) (&(task)->tinfo)
19617+#define task_stack_page(task) ((task)->stack)
19618+#define setup_thread_stack(p, org) do {} while (0)
19619+#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
19620+
19621 #endif
19622 #endif /* _ASM_X86_THREAD_INFO_H */
19623diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
19624index 04905bf..49203ca 100644
19625--- a/arch/x86/include/asm/tlbflush.h
19626+++ b/arch/x86/include/asm/tlbflush.h
19627@@ -17,18 +17,44 @@
19628
19629 static inline void __native_flush_tlb(void)
19630 {
19631+ if (static_cpu_has(X86_FEATURE_INVPCID)) {
19632+ u64 descriptor[2];
19633+
19634+ descriptor[0] = PCID_KERNEL;
19635+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_ALL_MONGLOBAL) : "memory");
19636+ return;
19637+ }
19638+
19639+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19640+ if (static_cpu_has(X86_FEATURE_PCID)) {
19641+ unsigned int cpu = raw_get_cpu();
19642+
19643+ native_write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER);
19644+ native_write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL);
19645+ raw_put_cpu_no_resched();
19646+ return;
19647+ }
19648+#endif
19649+
19650 native_write_cr3(native_read_cr3());
19651 }
19652
19653 static inline void __native_flush_tlb_global_irq_disabled(void)
19654 {
19655- unsigned long cr4;
19656+ if (static_cpu_has(X86_FEATURE_INVPCID)) {
19657+ u64 descriptor[2];
19658
19659- cr4 = native_read_cr4();
19660- /* clear PGE */
19661- native_write_cr4(cr4 & ~X86_CR4_PGE);
19662- /* write old PGE again and flush TLBs */
19663- native_write_cr4(cr4);
19664+ descriptor[0] = PCID_KERNEL;
19665+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_ALL_GLOBAL) : "memory");
19666+ } else {
19667+ unsigned long cr4;
19668+
19669+ cr4 = native_read_cr4();
19670+ /* clear PGE */
19671+ native_write_cr4(cr4 & ~X86_CR4_PGE);
19672+ /* write old PGE again and flush TLBs */
19673+ native_write_cr4(cr4);
19674+ }
19675 }
19676
19677 static inline void __native_flush_tlb_global(void)
19678@@ -49,6 +75,41 @@ static inline void __native_flush_tlb_global(void)
19679
19680 static inline void __native_flush_tlb_single(unsigned long addr)
19681 {
19682+ if (static_cpu_has(X86_FEATURE_INVPCID)) {
19683+ u64 descriptor[2];
19684+
19685+ descriptor[0] = PCID_KERNEL;
19686+ descriptor[1] = addr;
19687+
19688+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19689+ if (!static_cpu_has(X86_FEATURE_STRONGUDEREF) || addr >= TASK_SIZE_MAX) {
19690+ if (addr < TASK_SIZE_MAX)
19691+ descriptor[1] += pax_user_shadow_base;
19692+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_ADDRESS) : "memory");
19693+ }
19694+
19695+ descriptor[0] = PCID_USER;
19696+ descriptor[1] = addr;
19697+#endif
19698+
19699+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_ADDRESS) : "memory");
19700+ return;
19701+ }
19702+
19703+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19704+ if (static_cpu_has(X86_FEATURE_PCID)) {
19705+ unsigned int cpu = raw_get_cpu();
19706+
19707+ native_write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER | PCID_NOFLUSH);
19708+ asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
19709+ native_write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL | PCID_NOFLUSH);
19710+ raw_put_cpu_no_resched();
19711+
19712+ if (!static_cpu_has(X86_FEATURE_STRONGUDEREF) && addr < TASK_SIZE_MAX)
19713+ addr += pax_user_shadow_base;
19714+ }
19715+#endif
19716+
19717 asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
19718 }
19719
19720diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
19721index 0d592e0..7437fcc 100644
19722--- a/arch/x86/include/asm/uaccess.h
19723+++ b/arch/x86/include/asm/uaccess.h
19724@@ -7,6 +7,7 @@
19725 #include <linux/compiler.h>
19726 #include <linux/thread_info.h>
19727 #include <linux/string.h>
19728+#include <linux/spinlock.h>
19729 #include <asm/asm.h>
19730 #include <asm/page.h>
19731 #include <asm/smap.h>
19732@@ -29,7 +30,12 @@
19733
19734 #define get_ds() (KERNEL_DS)
19735 #define get_fs() (current_thread_info()->addr_limit)
19736+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
19737+void __set_fs(mm_segment_t x);
19738+void set_fs(mm_segment_t x);
19739+#else
19740 #define set_fs(x) (current_thread_info()->addr_limit = (x))
19741+#endif
19742
19743 #define segment_eq(a, b) ((a).seg == (b).seg)
19744
19745@@ -85,8 +91,36 @@ static inline bool __chk_range_not_ok(unsigned long addr, unsigned long size, un
19746 * checks that the pointer is in the user space range - after calling
19747 * this function, memory access functions may still return -EFAULT.
19748 */
19749-#define access_ok(type, addr, size) \
19750- likely(!__range_not_ok(addr, size, user_addr_max()))
19751+extern int _cond_resched(void);
19752+#define access_ok_noprefault(type, addr, size) (likely(!__range_not_ok(addr, size, user_addr_max())))
19753+#define access_ok(type, addr, size) \
19754+({ \
19755+ unsigned long __size = size; \
19756+ unsigned long __addr = (unsigned long)addr; \
19757+ bool __ret_ao = __range_not_ok(__addr, __size, user_addr_max()) == 0;\
19758+ if (__ret_ao && __size) { \
19759+ unsigned long __addr_ao = __addr & PAGE_MASK; \
19760+ unsigned long __end_ao = __addr + __size - 1; \
19761+ if (unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
19762+ while (__addr_ao <= __end_ao) { \
19763+ char __c_ao; \
19764+ __addr_ao += PAGE_SIZE; \
19765+ if (__size > PAGE_SIZE) \
19766+ _cond_resched(); \
19767+ if (__get_user(__c_ao, (char __user *)__addr)) \
19768+ break; \
19769+ if (type != VERIFY_WRITE) { \
19770+ __addr = __addr_ao; \
19771+ continue; \
19772+ } \
19773+ if (__put_user(__c_ao, (char __user *)__addr)) \
19774+ break; \
19775+ __addr = __addr_ao; \
19776+ } \
19777+ } \
19778+ } \
19779+ __ret_ao; \
19780+})
19781
19782 /*
19783 * The exception table consists of pairs of addresses relative to the
19784@@ -176,10 +210,12 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
19785 register __inttype(*(ptr)) __val_gu asm("%"_ASM_DX); \
19786 __chk_user_ptr(ptr); \
19787 might_fault(); \
19788+ pax_open_userland(); \
19789 asm volatile("call __get_user_%P3" \
19790 : "=a" (__ret_gu), "=r" (__val_gu) \
19791 : "0" (ptr), "i" (sizeof(*(ptr)))); \
19792 (x) = (__typeof__(*(ptr))) __val_gu; \
19793+ pax_close_userland(); \
19794 __ret_gu; \
19795 })
19796
19797@@ -187,13 +223,21 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
19798 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
19799 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
19800
19801-
19802+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
19803+#define __copyuser_seg "gs;"
19804+#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
19805+#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
19806+#else
19807+#define __copyuser_seg
19808+#define __COPYUSER_SET_ES
19809+#define __COPYUSER_RESTORE_ES
19810+#endif
19811
19812 #ifdef CONFIG_X86_32
19813 #define __put_user_asm_u64(x, addr, err, errret) \
19814 asm volatile(ASM_STAC "\n" \
19815- "1: movl %%eax,0(%2)\n" \
19816- "2: movl %%edx,4(%2)\n" \
19817+ "1: "__copyuser_seg"movl %%eax,0(%2)\n" \
19818+ "2: "__copyuser_seg"movl %%edx,4(%2)\n" \
19819 "3: " ASM_CLAC "\n" \
19820 ".section .fixup,\"ax\"\n" \
19821 "4: movl %3,%0\n" \
19822@@ -206,8 +250,8 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
19823
19824 #define __put_user_asm_ex_u64(x, addr) \
19825 asm volatile(ASM_STAC "\n" \
19826- "1: movl %%eax,0(%1)\n" \
19827- "2: movl %%edx,4(%1)\n" \
19828+ "1: "__copyuser_seg"movl %%eax,0(%1)\n" \
19829+ "2: "__copyuser_seg"movl %%edx,4(%1)\n" \
19830 "3: " ASM_CLAC "\n" \
19831 _ASM_EXTABLE_EX(1b, 2b) \
19832 _ASM_EXTABLE_EX(2b, 3b) \
19833@@ -257,7 +301,8 @@ extern void __put_user_8(void);
19834 __typeof__(*(ptr)) __pu_val; \
19835 __chk_user_ptr(ptr); \
19836 might_fault(); \
19837- __pu_val = x; \
19838+ __pu_val = (x); \
19839+ pax_open_userland(); \
19840 switch (sizeof(*(ptr))) { \
19841 case 1: \
19842 __put_user_x(1, __pu_val, ptr, __ret_pu); \
19843@@ -275,6 +320,7 @@ extern void __put_user_8(void);
19844 __put_user_x(X, __pu_val, ptr, __ret_pu); \
19845 break; \
19846 } \
19847+ pax_close_userland(); \
19848 __ret_pu; \
19849 })
19850
19851@@ -355,8 +401,10 @@ do { \
19852 } while (0)
19853
19854 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
19855+do { \
19856+ pax_open_userland(); \
19857 asm volatile(ASM_STAC "\n" \
19858- "1: mov"itype" %2,%"rtype"1\n" \
19859+ "1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
19860 "2: " ASM_CLAC "\n" \
19861 ".section .fixup,\"ax\"\n" \
19862 "3: mov %3,%0\n" \
19863@@ -364,8 +412,10 @@ do { \
19864 " jmp 2b\n" \
19865 ".previous\n" \
19866 _ASM_EXTABLE(1b, 3b) \
19867- : "=r" (err), ltype(x) \
19868- : "m" (__m(addr)), "i" (errret), "0" (err))
19869+ : "=r" (err), ltype (x) \
19870+ : "m" (__m(addr)), "i" (errret), "0" (err)); \
19871+ pax_close_userland(); \
19872+} while (0)
19873
19874 #define __get_user_size_ex(x, ptr, size) \
19875 do { \
19876@@ -389,7 +439,7 @@ do { \
19877 } while (0)
19878
19879 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
19880- asm volatile("1: mov"itype" %1,%"rtype"0\n" \
19881+ asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
19882 "2:\n" \
19883 _ASM_EXTABLE_EX(1b, 2b) \
19884 : ltype(x) : "m" (__m(addr)))
19885@@ -406,13 +456,24 @@ do { \
19886 int __gu_err; \
19887 unsigned long __gu_val; \
19888 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
19889- (x) = (__force __typeof__(*(ptr)))__gu_val; \
19890+ (x) = (__typeof__(*(ptr)))__gu_val; \
19891 __gu_err; \
19892 })
19893
19894 /* FIXME: this hack is definitely wrong -AK */
19895 struct __large_struct { unsigned long buf[100]; };
19896-#define __m(x) (*(struct __large_struct __user *)(x))
19897+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19898+#define ____m(x) \
19899+({ \
19900+ unsigned long ____x = (unsigned long)(x); \
19901+ if (____x < pax_user_shadow_base) \
19902+ ____x += pax_user_shadow_base; \
19903+ (typeof(x))____x; \
19904+})
19905+#else
19906+#define ____m(x) (x)
19907+#endif
19908+#define __m(x) (*(struct __large_struct __user *)____m(x))
19909
19910 /*
19911 * Tell gcc we read from memory instead of writing: this is because
19912@@ -420,8 +481,10 @@ struct __large_struct { unsigned long buf[100]; };
19913 * aliasing issues.
19914 */
19915 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
19916+do { \
19917+ pax_open_userland(); \
19918 asm volatile(ASM_STAC "\n" \
19919- "1: mov"itype" %"rtype"1,%2\n" \
19920+ "1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
19921 "2: " ASM_CLAC "\n" \
19922 ".section .fixup,\"ax\"\n" \
19923 "3: mov %3,%0\n" \
19924@@ -429,10 +492,12 @@ struct __large_struct { unsigned long buf[100]; };
19925 ".previous\n" \
19926 _ASM_EXTABLE(1b, 3b) \
19927 : "=r"(err) \
19928- : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
19929+ : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err));\
19930+ pax_close_userland(); \
19931+} while (0)
19932
19933 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
19934- asm volatile("1: mov"itype" %"rtype"0,%1\n" \
19935+ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
19936 "2:\n" \
19937 _ASM_EXTABLE_EX(1b, 2b) \
19938 : : ltype(x), "m" (__m(addr)))
19939@@ -442,11 +507,13 @@ struct __large_struct { unsigned long buf[100]; };
19940 */
19941 #define uaccess_try do { \
19942 current_thread_info()->uaccess_err = 0; \
19943+ pax_open_userland(); \
19944 stac(); \
19945 barrier();
19946
19947 #define uaccess_catch(err) \
19948 clac(); \
19949+ pax_close_userland(); \
19950 (err) |= (current_thread_info()->uaccess_err ? -EFAULT : 0); \
19951 } while (0)
19952
19953@@ -471,8 +538,12 @@ struct __large_struct { unsigned long buf[100]; };
19954 * On error, the variable @x is set to zero.
19955 */
19956
19957+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19958+#define __get_user(x, ptr) get_user((x), (ptr))
19959+#else
19960 #define __get_user(x, ptr) \
19961 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
19962+#endif
19963
19964 /**
19965 * __put_user: - Write a simple value into user space, with less checking.
19966@@ -494,8 +565,12 @@ struct __large_struct { unsigned long buf[100]; };
19967 * Returns zero on success, or -EFAULT on error.
19968 */
19969
19970+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19971+#define __put_user(x, ptr) put_user((x), (ptr))
19972+#else
19973 #define __put_user(x, ptr) \
19974 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
19975+#endif
19976
19977 #define __get_user_unaligned __get_user
19978 #define __put_user_unaligned __put_user
19979@@ -513,7 +588,7 @@ struct __large_struct { unsigned long buf[100]; };
19980 #define get_user_ex(x, ptr) do { \
19981 unsigned long __gue_val; \
19982 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
19983- (x) = (__force __typeof__(*(ptr)))__gue_val; \
19984+ (x) = (__typeof__(*(ptr)))__gue_val; \
19985 } while (0)
19986
19987 #define put_user_try uaccess_try
19988@@ -542,18 +617,19 @@ extern void __cmpxchg_wrong_size(void)
19989 __typeof__(ptr) __uval = (uval); \
19990 __typeof__(*(ptr)) __old = (old); \
19991 __typeof__(*(ptr)) __new = (new); \
19992+ pax_open_userland(); \
19993 switch (size) { \
19994 case 1: \
19995 { \
19996 asm volatile("\t" ASM_STAC "\n" \
19997- "1:\t" LOCK_PREFIX "cmpxchgb %4, %2\n" \
19998+ "1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgb %4, %2\n"\
19999 "2:\t" ASM_CLAC "\n" \
20000 "\t.section .fixup, \"ax\"\n" \
20001 "3:\tmov %3, %0\n" \
20002 "\tjmp 2b\n" \
20003 "\t.previous\n" \
20004 _ASM_EXTABLE(1b, 3b) \
20005- : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
20006+ : "+r" (__ret), "=a" (__old), "+m" (*____m(ptr))\
20007 : "i" (-EFAULT), "q" (__new), "1" (__old) \
20008 : "memory" \
20009 ); \
20010@@ -562,14 +638,14 @@ extern void __cmpxchg_wrong_size(void)
20011 case 2: \
20012 { \
20013 asm volatile("\t" ASM_STAC "\n" \
20014- "1:\t" LOCK_PREFIX "cmpxchgw %4, %2\n" \
20015+ "1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgw %4, %2\n"\
20016 "2:\t" ASM_CLAC "\n" \
20017 "\t.section .fixup, \"ax\"\n" \
20018 "3:\tmov %3, %0\n" \
20019 "\tjmp 2b\n" \
20020 "\t.previous\n" \
20021 _ASM_EXTABLE(1b, 3b) \
20022- : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
20023+ : "+r" (__ret), "=a" (__old), "+m" (*____m(ptr))\
20024 : "i" (-EFAULT), "r" (__new), "1" (__old) \
20025 : "memory" \
20026 ); \
20027@@ -578,14 +654,14 @@ extern void __cmpxchg_wrong_size(void)
20028 case 4: \
20029 { \
20030 asm volatile("\t" ASM_STAC "\n" \
20031- "1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n" \
20032+ "1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %4, %2\n"\
20033 "2:\t" ASM_CLAC "\n" \
20034 "\t.section .fixup, \"ax\"\n" \
20035 "3:\tmov %3, %0\n" \
20036 "\tjmp 2b\n" \
20037 "\t.previous\n" \
20038 _ASM_EXTABLE(1b, 3b) \
20039- : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
20040+ : "+r" (__ret), "=a" (__old), "+m" (*____m(ptr))\
20041 : "i" (-EFAULT), "r" (__new), "1" (__old) \
20042 : "memory" \
20043 ); \
20044@@ -597,14 +673,14 @@ extern void __cmpxchg_wrong_size(void)
20045 __cmpxchg_wrong_size(); \
20046 \
20047 asm volatile("\t" ASM_STAC "\n" \
20048- "1:\t" LOCK_PREFIX "cmpxchgq %4, %2\n" \
20049+ "1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgq %4, %2\n"\
20050 "2:\t" ASM_CLAC "\n" \
20051 "\t.section .fixup, \"ax\"\n" \
20052 "3:\tmov %3, %0\n" \
20053 "\tjmp 2b\n" \
20054 "\t.previous\n" \
20055 _ASM_EXTABLE(1b, 3b) \
20056- : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
20057+ : "+r" (__ret), "=a" (__old), "+m" (*____m(ptr))\
20058 : "i" (-EFAULT), "r" (__new), "1" (__old) \
20059 : "memory" \
20060 ); \
20061@@ -613,6 +689,7 @@ extern void __cmpxchg_wrong_size(void)
20062 default: \
20063 __cmpxchg_wrong_size(); \
20064 } \
20065+ pax_close_userland(); \
20066 *__uval = __old; \
20067 __ret; \
20068 })
20069@@ -636,17 +713,6 @@ extern struct movsl_mask {
20070
20071 #define ARCH_HAS_NOCACHE_UACCESS 1
20072
20073-#ifdef CONFIG_X86_32
20074-# include <asm/uaccess_32.h>
20075-#else
20076-# include <asm/uaccess_64.h>
20077-#endif
20078-
20079-unsigned long __must_check _copy_from_user(void *to, const void __user *from,
20080- unsigned n);
20081-unsigned long __must_check _copy_to_user(void __user *to, const void *from,
20082- unsigned n);
20083-
20084 #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
20085 # define copy_user_diag __compiletime_error
20086 #else
20087@@ -656,7 +722,7 @@ unsigned long __must_check _copy_to_user(void __user *to, const void *from,
20088 extern void copy_user_diag("copy_from_user() buffer size is too small")
20089 copy_from_user_overflow(void);
20090 extern void copy_user_diag("copy_to_user() buffer size is too small")
20091-copy_to_user_overflow(void) __asm__("copy_from_user_overflow");
20092+copy_to_user_overflow(void);
20093
20094 #undef copy_user_diag
20095
20096@@ -669,7 +735,7 @@ __copy_from_user_overflow(void) __asm__("copy_from_user_overflow");
20097
20098 extern void
20099 __compiletime_warning("copy_to_user() buffer size is not provably correct")
20100-__copy_to_user_overflow(void) __asm__("copy_from_user_overflow");
20101+__copy_to_user_overflow(void) __asm__("copy_to_user_overflow");
20102 #define __copy_to_user_overflow(size, count) __copy_to_user_overflow()
20103
20104 #else
20105@@ -684,10 +750,16 @@ __copy_from_user_overflow(int size, unsigned long count)
20106
20107 #endif
20108
20109+#ifdef CONFIG_X86_32
20110+# include <asm/uaccess_32.h>
20111+#else
20112+# include <asm/uaccess_64.h>
20113+#endif
20114+
20115 static inline unsigned long __must_check
20116 copy_from_user(void *to, const void __user *from, unsigned long n)
20117 {
20118- int sz = __compiletime_object_size(to);
20119+ size_t sz = __compiletime_object_size(to);
20120
20121 might_fault();
20122
20123@@ -709,12 +781,15 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
20124 * case, and do only runtime checking for non-constant sizes.
20125 */
20126
20127- if (likely(sz < 0 || sz >= n))
20128- n = _copy_from_user(to, from, n);
20129- else if(__builtin_constant_p(n))
20130- copy_from_user_overflow();
20131- else
20132- __copy_from_user_overflow(sz, n);
20133+ if (likely(sz != (size_t)-1 && sz < n)) {
20134+ if(__builtin_constant_p(n))
20135+ copy_from_user_overflow();
20136+ else
20137+ __copy_from_user_overflow(sz, n);
20138+ } if (access_ok(VERIFY_READ, from, n))
20139+ n = __copy_from_user(to, from, n);
20140+ else if ((long)n > 0)
20141+ memset(to, 0, n);
20142
20143 return n;
20144 }
20145@@ -722,17 +797,18 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
20146 static inline unsigned long __must_check
20147 copy_to_user(void __user *to, const void *from, unsigned long n)
20148 {
20149- int sz = __compiletime_object_size(from);
20150+ size_t sz = __compiletime_object_size(from);
20151
20152 might_fault();
20153
20154 /* See the comment in copy_from_user() above. */
20155- if (likely(sz < 0 || sz >= n))
20156- n = _copy_to_user(to, from, n);
20157- else if(__builtin_constant_p(n))
20158- copy_to_user_overflow();
20159- else
20160- __copy_to_user_overflow(sz, n);
20161+ if (likely(sz != (size_t)-1 && sz < n)) {
20162+ if(__builtin_constant_p(n))
20163+ copy_to_user_overflow();
20164+ else
20165+ __copy_to_user_overflow(sz, n);
20166+ } else if (access_ok(VERIFY_WRITE, to, n))
20167+ n = __copy_to_user(to, from, n);
20168
20169 return n;
20170 }
20171diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h
20172index 3c03a5d..1071638 100644
20173--- a/arch/x86/include/asm/uaccess_32.h
20174+++ b/arch/x86/include/asm/uaccess_32.h
20175@@ -43,6 +43,11 @@ unsigned long __must_check __copy_from_user_ll_nocache_nozero
20176 static __always_inline unsigned long __must_check
20177 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
20178 {
20179+ if ((long)n < 0)
20180+ return n;
20181+
20182+ check_object_size(from, n, true);
20183+
20184 if (__builtin_constant_p(n)) {
20185 unsigned long ret;
20186
20187@@ -82,12 +87,16 @@ static __always_inline unsigned long __must_check
20188 __copy_to_user(void __user *to, const void *from, unsigned long n)
20189 {
20190 might_fault();
20191+
20192 return __copy_to_user_inatomic(to, from, n);
20193 }
20194
20195 static __always_inline unsigned long
20196 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
20197 {
20198+ if ((long)n < 0)
20199+ return n;
20200+
20201 /* Avoid zeroing the tail if the copy fails..
20202 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
20203 * but as the zeroing behaviour is only significant when n is not
20204@@ -137,6 +146,12 @@ static __always_inline unsigned long
20205 __copy_from_user(void *to, const void __user *from, unsigned long n)
20206 {
20207 might_fault();
20208+
20209+ if ((long)n < 0)
20210+ return n;
20211+
20212+ check_object_size(to, n, false);
20213+
20214 if (__builtin_constant_p(n)) {
20215 unsigned long ret;
20216
20217@@ -159,6 +174,10 @@ static __always_inline unsigned long __copy_from_user_nocache(void *to,
20218 const void __user *from, unsigned long n)
20219 {
20220 might_fault();
20221+
20222+ if ((long)n < 0)
20223+ return n;
20224+
20225 if (__builtin_constant_p(n)) {
20226 unsigned long ret;
20227
20228@@ -181,7 +200,10 @@ static __always_inline unsigned long
20229 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
20230 unsigned long n)
20231 {
20232- return __copy_from_user_ll_nocache_nozero(to, from, n);
20233+ if ((long)n < 0)
20234+ return n;
20235+
20236+ return __copy_from_user_ll_nocache_nozero(to, from, n);
20237 }
20238
20239 #endif /* _ASM_X86_UACCESS_32_H */
20240diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
20241index 12a26b9..206c200 100644
20242--- a/arch/x86/include/asm/uaccess_64.h
20243+++ b/arch/x86/include/asm/uaccess_64.h
20244@@ -10,6 +10,9 @@
20245 #include <asm/alternative.h>
20246 #include <asm/cpufeature.h>
20247 #include <asm/page.h>
20248+#include <asm/pgtable.h>
20249+
20250+#define set_fs(x) (current_thread_info()->addr_limit = (x))
20251
20252 /*
20253 * Copy To/From Userspace
20254@@ -17,14 +20,14 @@
20255
20256 /* Handles exceptions in both to and from, but doesn't do access_ok */
20257 __must_check unsigned long
20258-copy_user_enhanced_fast_string(void *to, const void *from, unsigned len);
20259+copy_user_enhanced_fast_string(void *to, const void *from, unsigned len) __size_overflow(3);
20260 __must_check unsigned long
20261-copy_user_generic_string(void *to, const void *from, unsigned len);
20262+copy_user_generic_string(void *to, const void *from, unsigned len) __size_overflow(3);
20263 __must_check unsigned long
20264-copy_user_generic_unrolled(void *to, const void *from, unsigned len);
20265+copy_user_generic_unrolled(void *to, const void *from, unsigned len) __size_overflow(3);
20266
20267 static __always_inline __must_check unsigned long
20268-copy_user_generic(void *to, const void *from, unsigned len)
20269+copy_user_generic(void *to, const void *from, unsigned long len)
20270 {
20271 unsigned ret;
20272
20273@@ -46,121 +49,170 @@ copy_user_generic(void *to, const void *from, unsigned len)
20274 }
20275
20276 __must_check unsigned long
20277-copy_in_user(void __user *to, const void __user *from, unsigned len);
20278+copy_in_user(void __user *to, const void __user *from, unsigned long len);
20279
20280 static __always_inline __must_check
20281-int __copy_from_user_nocheck(void *dst, const void __user *src, unsigned size)
20282+unsigned long __copy_from_user_nocheck(void *dst, const void __user *src, unsigned long size)
20283 {
20284- int ret = 0;
20285+ size_t sz = __compiletime_object_size(dst);
20286+ unsigned ret = 0;
20287+
20288+ if (size > INT_MAX)
20289+ return size;
20290+
20291+ check_object_size(dst, size, false);
20292+
20293+#ifdef CONFIG_PAX_MEMORY_UDEREF
20294+ if (!access_ok_noprefault(VERIFY_READ, src, size))
20295+ return size;
20296+#endif
20297+
20298+ if (unlikely(sz != (size_t)-1 && sz < size)) {
20299+ if(__builtin_constant_p(size))
20300+ copy_from_user_overflow();
20301+ else
20302+ __copy_from_user_overflow(sz, size);
20303+ return size;
20304+ }
20305
20306 if (!__builtin_constant_p(size))
20307- return copy_user_generic(dst, (__force void *)src, size);
20308+ return copy_user_generic(dst, (__force_kernel const void *)____m(src), size);
20309 switch (size) {
20310- case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
20311+ case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
20312 ret, "b", "b", "=q", 1);
20313 return ret;
20314- case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
20315+ case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
20316 ret, "w", "w", "=r", 2);
20317 return ret;
20318- case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
20319+ case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
20320 ret, "l", "k", "=r", 4);
20321 return ret;
20322- case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
20323+ case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
20324 ret, "q", "", "=r", 8);
20325 return ret;
20326 case 10:
20327- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
20328+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
20329 ret, "q", "", "=r", 10);
20330 if (unlikely(ret))
20331 return ret;
20332 __get_user_asm(*(u16 *)(8 + (char *)dst),
20333- (u16 __user *)(8 + (char __user *)src),
20334+ (const u16 __user *)(8 + (const char __user *)src),
20335 ret, "w", "w", "=r", 2);
20336 return ret;
20337 case 16:
20338- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
20339+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
20340 ret, "q", "", "=r", 16);
20341 if (unlikely(ret))
20342 return ret;
20343 __get_user_asm(*(u64 *)(8 + (char *)dst),
20344- (u64 __user *)(8 + (char __user *)src),
20345+ (const u64 __user *)(8 + (const char __user *)src),
20346 ret, "q", "", "=r", 8);
20347 return ret;
20348 default:
20349- return copy_user_generic(dst, (__force void *)src, size);
20350+ return copy_user_generic(dst, (__force_kernel const void *)____m(src), size);
20351 }
20352 }
20353
20354 static __always_inline __must_check
20355-int __copy_from_user(void *dst, const void __user *src, unsigned size)
20356+unsigned long __copy_from_user(void *dst, const void __user *src, unsigned long size)
20357 {
20358 might_fault();
20359 return __copy_from_user_nocheck(dst, src, size);
20360 }
20361
20362 static __always_inline __must_check
20363-int __copy_to_user_nocheck(void __user *dst, const void *src, unsigned size)
20364+unsigned long __copy_to_user_nocheck(void __user *dst, const void *src, unsigned long size)
20365 {
20366- int ret = 0;
20367+ size_t sz = __compiletime_object_size(src);
20368+ unsigned ret = 0;
20369+
20370+ if (size > INT_MAX)
20371+ return size;
20372+
20373+ check_object_size(src, size, true);
20374+
20375+#ifdef CONFIG_PAX_MEMORY_UDEREF
20376+ if (!access_ok_noprefault(VERIFY_WRITE, dst, size))
20377+ return size;
20378+#endif
20379+
20380+ if (unlikely(sz != (size_t)-1 && sz < size)) {
20381+ if(__builtin_constant_p(size))
20382+ copy_to_user_overflow();
20383+ else
20384+ __copy_to_user_overflow(sz, size);
20385+ return size;
20386+ }
20387
20388 if (!__builtin_constant_p(size))
20389- return copy_user_generic((__force void *)dst, src, size);
20390+ return copy_user_generic((__force_kernel void *)____m(dst), src, size);
20391 switch (size) {
20392- case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
20393+ case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
20394 ret, "b", "b", "iq", 1);
20395 return ret;
20396- case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
20397+ case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
20398 ret, "w", "w", "ir", 2);
20399 return ret;
20400- case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
20401+ case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
20402 ret, "l", "k", "ir", 4);
20403 return ret;
20404- case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
20405+ case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
20406 ret, "q", "", "er", 8);
20407 return ret;
20408 case 10:
20409- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
20410+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
20411 ret, "q", "", "er", 10);
20412 if (unlikely(ret))
20413 return ret;
20414 asm("":::"memory");
20415- __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
20416+ __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
20417 ret, "w", "w", "ir", 2);
20418 return ret;
20419 case 16:
20420- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
20421+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
20422 ret, "q", "", "er", 16);
20423 if (unlikely(ret))
20424 return ret;
20425 asm("":::"memory");
20426- __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
20427+ __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
20428 ret, "q", "", "er", 8);
20429 return ret;
20430 default:
20431- return copy_user_generic((__force void *)dst, src, size);
20432+ return copy_user_generic((__force_kernel void *)____m(dst), src, size);
20433 }
20434 }
20435
20436 static __always_inline __must_check
20437-int __copy_to_user(void __user *dst, const void *src, unsigned size)
20438+unsigned long __copy_to_user(void __user *dst, const void *src, unsigned long size)
20439 {
20440 might_fault();
20441 return __copy_to_user_nocheck(dst, src, size);
20442 }
20443
20444 static __always_inline __must_check
20445-int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
20446+unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned size)
20447 {
20448- int ret = 0;
20449+ unsigned ret = 0;
20450
20451 might_fault();
20452+
20453+ if (size > INT_MAX)
20454+ return size;
20455+
20456+#ifdef CONFIG_PAX_MEMORY_UDEREF
20457+ if (!access_ok_noprefault(VERIFY_READ, src, size))
20458+ return size;
20459+ if (!access_ok_noprefault(VERIFY_WRITE, dst, size))
20460+ return size;
20461+#endif
20462+
20463 if (!__builtin_constant_p(size))
20464- return copy_user_generic((__force void *)dst,
20465- (__force void *)src, size);
20466+ return copy_user_generic((__force_kernel void *)____m(dst),
20467+ (__force_kernel const void *)____m(src), size);
20468 switch (size) {
20469 case 1: {
20470 u8 tmp;
20471- __get_user_asm(tmp, (u8 __user *)src,
20472+ __get_user_asm(tmp, (const u8 __user *)src,
20473 ret, "b", "b", "=q", 1);
20474 if (likely(!ret))
20475 __put_user_asm(tmp, (u8 __user *)dst,
20476@@ -169,7 +221,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
20477 }
20478 case 2: {
20479 u16 tmp;
20480- __get_user_asm(tmp, (u16 __user *)src,
20481+ __get_user_asm(tmp, (const u16 __user *)src,
20482 ret, "w", "w", "=r", 2);
20483 if (likely(!ret))
20484 __put_user_asm(tmp, (u16 __user *)dst,
20485@@ -179,7 +231,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
20486
20487 case 4: {
20488 u32 tmp;
20489- __get_user_asm(tmp, (u32 __user *)src,
20490+ __get_user_asm(tmp, (const u32 __user *)src,
20491 ret, "l", "k", "=r", 4);
20492 if (likely(!ret))
20493 __put_user_asm(tmp, (u32 __user *)dst,
20494@@ -188,7 +240,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
20495 }
20496 case 8: {
20497 u64 tmp;
20498- __get_user_asm(tmp, (u64 __user *)src,
20499+ __get_user_asm(tmp, (const u64 __user *)src,
20500 ret, "q", "", "=r", 8);
20501 if (likely(!ret))
20502 __put_user_asm(tmp, (u64 __user *)dst,
20503@@ -196,41 +248,58 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
20504 return ret;
20505 }
20506 default:
20507- return copy_user_generic((__force void *)dst,
20508- (__force void *)src, size);
20509+ return copy_user_generic((__force_kernel void *)____m(dst),
20510+ (__force_kernel const void *)____m(src), size);
20511 }
20512 }
20513
20514-static __must_check __always_inline int
20515-__copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
20516+static __must_check __always_inline unsigned long
20517+__copy_from_user_inatomic(void *dst, const void __user *src, unsigned long size)
20518 {
20519 return __copy_from_user_nocheck(dst, src, size);
20520 }
20521
20522-static __must_check __always_inline int
20523-__copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
20524+static __must_check __always_inline unsigned long
20525+__copy_to_user_inatomic(void __user *dst, const void *src, unsigned long size)
20526 {
20527 return __copy_to_user_nocheck(dst, src, size);
20528 }
20529
20530-extern long __copy_user_nocache(void *dst, const void __user *src,
20531- unsigned size, int zerorest);
20532+extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
20533+ unsigned long size, int zerorest);
20534
20535-static inline int
20536-__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
20537+static inline unsigned long
20538+__copy_from_user_nocache(void *dst, const void __user *src, unsigned long size)
20539 {
20540 might_fault();
20541+
20542+ if (size > INT_MAX)
20543+ return size;
20544+
20545+#ifdef CONFIG_PAX_MEMORY_UDEREF
20546+ if (!access_ok_noprefault(VERIFY_READ, src, size))
20547+ return size;
20548+#endif
20549+
20550 return __copy_user_nocache(dst, src, size, 1);
20551 }
20552
20553-static inline int
20554+static inline unsigned long
20555 __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
20556- unsigned size)
20557+ unsigned long size)
20558 {
20559+ if (size > INT_MAX)
20560+ return size;
20561+
20562+#ifdef CONFIG_PAX_MEMORY_UDEREF
20563+ if (!access_ok_noprefault(VERIFY_READ, src, size))
20564+ return size;
20565+#endif
20566+
20567 return __copy_user_nocache(dst, src, size, 0);
20568 }
20569
20570 unsigned long
20571-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
20572+copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest) __size_overflow(3);
20573
20574 #endif /* _ASM_X86_UACCESS_64_H */
20575diff --git a/arch/x86/include/asm/word-at-a-time.h b/arch/x86/include/asm/word-at-a-time.h
20576index 5b238981..77fdd78 100644
20577--- a/arch/x86/include/asm/word-at-a-time.h
20578+++ b/arch/x86/include/asm/word-at-a-time.h
20579@@ -11,7 +11,7 @@
20580 * and shift, for example.
20581 */
20582 struct word_at_a_time {
20583- const unsigned long one_bits, high_bits;
20584+ unsigned long one_bits, high_bits;
20585 };
20586
20587 #define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0x01), REPEAT_BYTE(0x80) }
20588diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
20589index e45e4da..44e8572 100644
20590--- a/arch/x86/include/asm/x86_init.h
20591+++ b/arch/x86/include/asm/x86_init.h
20592@@ -129,7 +129,7 @@ struct x86_init_ops {
20593 struct x86_init_timers timers;
20594 struct x86_init_iommu iommu;
20595 struct x86_init_pci pci;
20596-};
20597+} __no_const;
20598
20599 /**
20600 * struct x86_cpuinit_ops - platform specific cpu hotplug setups
20601@@ -140,7 +140,7 @@ struct x86_cpuinit_ops {
20602 void (*setup_percpu_clockev)(void);
20603 void (*early_percpu_clock_init)(void);
20604 void (*fixup_cpu_id)(struct cpuinfo_x86 *c, int node);
20605-};
20606+} __no_const;
20607
20608 struct timespec;
20609
20610@@ -168,7 +168,7 @@ struct x86_platform_ops {
20611 void (*save_sched_clock_state)(void);
20612 void (*restore_sched_clock_state)(void);
20613 void (*apic_post_init)(void);
20614-};
20615+} __no_const;
20616
20617 struct pci_dev;
20618 struct msi_msg;
20619@@ -185,7 +185,7 @@ struct x86_msi_ops {
20620 int (*setup_hpet_msi)(unsigned int irq, unsigned int id);
20621 u32 (*msi_mask_irq)(struct msi_desc *desc, u32 mask, u32 flag);
20622 u32 (*msix_mask_irq)(struct msi_desc *desc, u32 flag);
20623-};
20624+} __no_const;
20625
20626 struct IO_APIC_route_entry;
20627 struct io_apic_irq_attr;
20628@@ -206,7 +206,7 @@ struct x86_io_apic_ops {
20629 unsigned int destination, int vector,
20630 struct io_apic_irq_attr *attr);
20631 void (*eoi_ioapic_pin)(int apic, int pin, int vector);
20632-};
20633+} __no_const;
20634
20635 extern struct x86_init_ops x86_init;
20636 extern struct x86_cpuinit_ops x86_cpuinit;
20637diff --git a/arch/x86/include/asm/xen/page.h b/arch/x86/include/asm/xen/page.h
20638index 3e276eb..2eb3c30 100644
20639--- a/arch/x86/include/asm/xen/page.h
20640+++ b/arch/x86/include/asm/xen/page.h
20641@@ -56,7 +56,7 @@ extern int m2p_remove_override(struct page *page,
20642 extern struct page *m2p_find_override(unsigned long mfn);
20643 extern unsigned long m2p_find_override_pfn(unsigned long mfn, unsigned long pfn);
20644
20645-static inline unsigned long pfn_to_mfn(unsigned long pfn)
20646+static inline unsigned long __intentional_overflow(-1) pfn_to_mfn(unsigned long pfn)
20647 {
20648 unsigned long mfn;
20649
20650diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h
20651index 6c1d741..39e6ecf 100644
20652--- a/arch/x86/include/asm/xsave.h
20653+++ b/arch/x86/include/asm/xsave.h
20654@@ -80,8 +80,11 @@ static inline int xsave_user(struct xsave_struct __user *buf)
20655 if (unlikely(err))
20656 return -EFAULT;
20657
20658+ pax_open_userland();
20659 __asm__ __volatile__(ASM_STAC "\n"
20660- "1: .byte " REX_PREFIX "0x0f,0xae,0x27\n"
20661+ "1:"
20662+ __copyuser_seg
20663+ ".byte " REX_PREFIX "0x0f,0xae,0x27\n"
20664 "2: " ASM_CLAC "\n"
20665 ".section .fixup,\"ax\"\n"
20666 "3: movl $-1,%[err]\n"
20667@@ -91,18 +94,22 @@ static inline int xsave_user(struct xsave_struct __user *buf)
20668 : [err] "=r" (err)
20669 : "D" (buf), "a" (-1), "d" (-1), "0" (0)
20670 : "memory");
20671+ pax_close_userland();
20672 return err;
20673 }
20674
20675 static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
20676 {
20677 int err;
20678- struct xsave_struct *xstate = ((__force struct xsave_struct *)buf);
20679+ struct xsave_struct *xstate = ((__force_kernel struct xsave_struct *)buf);
20680 u32 lmask = mask;
20681 u32 hmask = mask >> 32;
20682
20683+ pax_open_userland();
20684 __asm__ __volatile__(ASM_STAC "\n"
20685- "1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
20686+ "1:"
20687+ __copyuser_seg
20688+ ".byte " REX_PREFIX "0x0f,0xae,0x2f\n"
20689 "2: " ASM_CLAC "\n"
20690 ".section .fixup,\"ax\"\n"
20691 "3: movl $-1,%[err]\n"
20692@@ -112,6 +119,7 @@ static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
20693 : [err] "=r" (err)
20694 : "D" (xstate), "a" (lmask), "d" (hmask), "0" (0)
20695 : "memory"); /* memory required? */
20696+ pax_close_userland();
20697 return err;
20698 }
20699
20700diff --git a/arch/x86/include/uapi/asm/e820.h b/arch/x86/include/uapi/asm/e820.h
20701index bbae024..e1528f9 100644
20702--- a/arch/x86/include/uapi/asm/e820.h
20703+++ b/arch/x86/include/uapi/asm/e820.h
20704@@ -63,7 +63,7 @@ struct e820map {
20705 #define ISA_START_ADDRESS 0xa0000
20706 #define ISA_END_ADDRESS 0x100000
20707
20708-#define BIOS_BEGIN 0x000a0000
20709+#define BIOS_BEGIN 0x000c0000
20710 #define BIOS_END 0x00100000
20711
20712 #define BIOS_ROM_BASE 0xffe00000
20713diff --git a/arch/x86/include/uapi/asm/ptrace-abi.h b/arch/x86/include/uapi/asm/ptrace-abi.h
20714index 7b0a55a..ad115bf 100644
20715--- a/arch/x86/include/uapi/asm/ptrace-abi.h
20716+++ b/arch/x86/include/uapi/asm/ptrace-abi.h
20717@@ -49,7 +49,6 @@
20718 #define EFLAGS 144
20719 #define RSP 152
20720 #define SS 160
20721-#define ARGOFFSET R11
20722 #endif /* __ASSEMBLY__ */
20723
20724 /* top of stack page */
20725diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
20726index cb648c8..91cb07e 100644
20727--- a/arch/x86/kernel/Makefile
20728+++ b/arch/x86/kernel/Makefile
20729@@ -24,7 +24,7 @@ obj-y += time.o ioport.o ldt.o dumpstack.o nmi.o
20730 obj-y += setup.o x86_init.o i8259.o irqinit.o jump_label.o
20731 obj-$(CONFIG_IRQ_WORK) += irq_work.o
20732 obj-y += probe_roms.o
20733-obj-$(CONFIG_X86_32) += i386_ksyms_32.o
20734+obj-$(CONFIG_X86_32) += sys_i386_32.o i386_ksyms_32.o
20735 obj-$(CONFIG_X86_64) += sys_x86_64.o x8664_ksyms_64.o
20736 obj-y += syscall_$(BITS).o
20737 obj-$(CONFIG_X86_64) += vsyscall_64.o
20738diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
20739index 1dac942..19c8b0c 100644
20740--- a/arch/x86/kernel/acpi/boot.c
20741+++ b/arch/x86/kernel/acpi/boot.c
20742@@ -1312,7 +1312,7 @@ static int __init dmi_ignore_irq0_timer_override(const struct dmi_system_id *d)
20743 * If your system is blacklisted here, but you find that acpi=force
20744 * works for you, please contact linux-acpi@vger.kernel.org
20745 */
20746-static struct dmi_system_id __initdata acpi_dmi_table[] = {
20747+static const struct dmi_system_id __initconst acpi_dmi_table[] = {
20748 /*
20749 * Boxes that need ACPI disabled
20750 */
20751@@ -1387,7 +1387,7 @@ static struct dmi_system_id __initdata acpi_dmi_table[] = {
20752 };
20753
20754 /* second table for DMI checks that should run after early-quirks */
20755-static struct dmi_system_id __initdata acpi_dmi_table_late[] = {
20756+static const struct dmi_system_id __initconst acpi_dmi_table_late[] = {
20757 /*
20758 * HP laptops which use a DSDT reporting as HP/SB400/10000,
20759 * which includes some code which overrides all temperature
20760diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
20761index 3a2ae4c..9db31d6 100644
20762--- a/arch/x86/kernel/acpi/sleep.c
20763+++ b/arch/x86/kernel/acpi/sleep.c
20764@@ -99,8 +99,12 @@ int x86_acpi_suspend_lowlevel(void)
20765 #else /* CONFIG_64BIT */
20766 #ifdef CONFIG_SMP
20767 stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
20768+
20769+ pax_open_kernel();
20770 early_gdt_descr.address =
20771 (unsigned long)get_cpu_gdt_table(smp_processor_id());
20772+ pax_close_kernel();
20773+
20774 initial_gs = per_cpu_offset(smp_processor_id());
20775 #endif
20776 initial_code = (unsigned long)wakeup_long64;
20777diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S
20778index 665c6b7..eae4d56 100644
20779--- a/arch/x86/kernel/acpi/wakeup_32.S
20780+++ b/arch/x86/kernel/acpi/wakeup_32.S
20781@@ -29,13 +29,11 @@ wakeup_pmode_return:
20782 # and restore the stack ... but you need gdt for this to work
20783 movl saved_context_esp, %esp
20784
20785- movl %cs:saved_magic, %eax
20786- cmpl $0x12345678, %eax
20787+ cmpl $0x12345678, saved_magic
20788 jne bogus_magic
20789
20790 # jump to place where we left off
20791- movl saved_eip, %eax
20792- jmp *%eax
20793+ jmp *(saved_eip)
20794
20795 bogus_magic:
20796 jmp bogus_magic
20797diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
20798index df94598..f3b29bf 100644
20799--- a/arch/x86/kernel/alternative.c
20800+++ b/arch/x86/kernel/alternative.c
20801@@ -269,6 +269,13 @@ void __init_or_module apply_alternatives(struct alt_instr *start,
20802 */
20803 for (a = start; a < end; a++) {
20804 instr = (u8 *)&a->instr_offset + a->instr_offset;
20805+
20806+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20807+ instr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
20808+ if (instr < (u8 *)_text || (u8 *)_einittext <= instr)
20809+ instr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
20810+#endif
20811+
20812 replacement = (u8 *)&a->repl_offset + a->repl_offset;
20813 BUG_ON(a->replacementlen > a->instrlen);
20814 BUG_ON(a->instrlen > sizeof(insnbuf));
20815@@ -300,10 +307,16 @@ static void alternatives_smp_lock(const s32 *start, const s32 *end,
20816 for (poff = start; poff < end; poff++) {
20817 u8 *ptr = (u8 *)poff + *poff;
20818
20819+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20820+ ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
20821+ if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
20822+ ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
20823+#endif
20824+
20825 if (!*poff || ptr < text || ptr >= text_end)
20826 continue;
20827 /* turn DS segment override prefix into lock prefix */
20828- if (*ptr == 0x3e)
20829+ if (*ktla_ktva(ptr) == 0x3e)
20830 text_poke(ptr, ((unsigned char []){0xf0}), 1);
20831 }
20832 mutex_unlock(&text_mutex);
20833@@ -318,10 +331,16 @@ static void alternatives_smp_unlock(const s32 *start, const s32 *end,
20834 for (poff = start; poff < end; poff++) {
20835 u8 *ptr = (u8 *)poff + *poff;
20836
20837+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20838+ ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
20839+ if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
20840+ ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
20841+#endif
20842+
20843 if (!*poff || ptr < text || ptr >= text_end)
20844 continue;
20845 /* turn lock prefix into DS segment override prefix */
20846- if (*ptr == 0xf0)
20847+ if (*ktla_ktva(ptr) == 0xf0)
20848 text_poke(ptr, ((unsigned char []){0x3E}), 1);
20849 }
20850 mutex_unlock(&text_mutex);
20851@@ -458,7 +477,7 @@ void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
20852
20853 BUG_ON(p->len > MAX_PATCH_LEN);
20854 /* prep the buffer with the original instructions */
20855- memcpy(insnbuf, p->instr, p->len);
20856+ memcpy(insnbuf, ktla_ktva(p->instr), p->len);
20857 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
20858 (unsigned long)p->instr, p->len);
20859
20860@@ -505,7 +524,7 @@ void __init alternative_instructions(void)
20861 if (!uniproc_patched || num_possible_cpus() == 1)
20862 free_init_pages("SMP alternatives",
20863 (unsigned long)__smp_locks,
20864- (unsigned long)__smp_locks_end);
20865+ PAGE_ALIGN((unsigned long)__smp_locks_end));
20866 #endif
20867
20868 apply_paravirt(__parainstructions, __parainstructions_end);
20869@@ -525,13 +544,17 @@ void __init alternative_instructions(void)
20870 * instructions. And on the local CPU you need to be protected again NMI or MCE
20871 * handlers seeing an inconsistent instruction while you patch.
20872 */
20873-void *__init_or_module text_poke_early(void *addr, const void *opcode,
20874+void *__kprobes text_poke_early(void *addr, const void *opcode,
20875 size_t len)
20876 {
20877 unsigned long flags;
20878 local_irq_save(flags);
20879- memcpy(addr, opcode, len);
20880+
20881+ pax_open_kernel();
20882+ memcpy(ktla_ktva(addr), opcode, len);
20883 sync_core();
20884+ pax_close_kernel();
20885+
20886 local_irq_restore(flags);
20887 /* Could also do a CLFLUSH here to speed up CPU recovery; but
20888 that causes hangs on some VIA CPUs. */
20889@@ -553,36 +576,22 @@ void *__init_or_module text_poke_early(void *addr, const void *opcode,
20890 */
20891 void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
20892 {
20893- unsigned long flags;
20894- char *vaddr;
20895+ unsigned char *vaddr = ktla_ktva(addr);
20896 struct page *pages[2];
20897- int i;
20898+ size_t i;
20899
20900 if (!core_kernel_text((unsigned long)addr)) {
20901- pages[0] = vmalloc_to_page(addr);
20902- pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
20903+ pages[0] = vmalloc_to_page(vaddr);
20904+ pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
20905 } else {
20906- pages[0] = virt_to_page(addr);
20907+ pages[0] = virt_to_page(vaddr);
20908 WARN_ON(!PageReserved(pages[0]));
20909- pages[1] = virt_to_page(addr + PAGE_SIZE);
20910+ pages[1] = virt_to_page(vaddr + PAGE_SIZE);
20911 }
20912 BUG_ON(!pages[0]);
20913- local_irq_save(flags);
20914- set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
20915- if (pages[1])
20916- set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
20917- vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
20918- memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
20919- clear_fixmap(FIX_TEXT_POKE0);
20920- if (pages[1])
20921- clear_fixmap(FIX_TEXT_POKE1);
20922- local_flush_tlb();
20923- sync_core();
20924- /* Could also do a CLFLUSH here to speed up CPU recovery; but
20925- that causes hangs on some VIA CPUs. */
20926+ text_poke_early(addr, opcode, len);
20927 for (i = 0; i < len; i++)
20928- BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
20929- local_irq_restore(flags);
20930+ BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
20931 return addr;
20932 }
20933
20934@@ -602,7 +611,7 @@ int poke_int3_handler(struct pt_regs *regs)
20935 if (likely(!bp_patching_in_progress))
20936 return 0;
20937
20938- if (user_mode_vm(regs) || regs->ip != (unsigned long)bp_int3_addr)
20939+ if (user_mode(regs) || regs->ip != (unsigned long)bp_int3_addr)
20940 return 0;
20941
20942 /* set up the specified breakpoint handler */
20943@@ -636,7 +645,7 @@ int poke_int3_handler(struct pt_regs *regs)
20944 */
20945 void *text_poke_bp(void *addr, const void *opcode, size_t len, void *handler)
20946 {
20947- unsigned char int3 = 0xcc;
20948+ const unsigned char int3 = 0xcc;
20949
20950 bp_int3_handler = handler;
20951 bp_int3_addr = (u8 *)addr + sizeof(int3);
20952diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
20953index 7f26c9a..694544e 100644
20954--- a/arch/x86/kernel/apic/apic.c
20955+++ b/arch/x86/kernel/apic/apic.c
20956@@ -198,7 +198,7 @@ int first_system_vector = 0xfe;
20957 /*
20958 * Debug level, exported for io_apic.c
20959 */
20960-unsigned int apic_verbosity;
20961+int apic_verbosity;
20962
20963 int pic_mode;
20964
20965@@ -1992,7 +1992,7 @@ static inline void __smp_error_interrupt(struct pt_regs *regs)
20966 apic_write(APIC_ESR, 0);
20967 v = apic_read(APIC_ESR);
20968 ack_APIC_irq();
20969- atomic_inc(&irq_err_count);
20970+ atomic_inc_unchecked(&irq_err_count);
20971
20972 apic_printk(APIC_DEBUG, KERN_DEBUG "APIC error on CPU%d: %02x",
20973 smp_processor_id(), v);
20974diff --git a/arch/x86/kernel/apic/apic_flat_64.c b/arch/x86/kernel/apic/apic_flat_64.c
20975index 2c621a6..fa2b1ae 100644
20976--- a/arch/x86/kernel/apic/apic_flat_64.c
20977+++ b/arch/x86/kernel/apic/apic_flat_64.c
20978@@ -154,7 +154,7 @@ static int flat_probe(void)
20979 return 1;
20980 }
20981
20982-static struct apic apic_flat = {
20983+static struct apic apic_flat __read_only = {
20984 .name = "flat",
20985 .probe = flat_probe,
20986 .acpi_madt_oem_check = flat_acpi_madt_oem_check,
20987@@ -268,7 +268,7 @@ static int physflat_probe(void)
20988 return 0;
20989 }
20990
20991-static struct apic apic_physflat = {
20992+static struct apic apic_physflat __read_only = {
20993
20994 .name = "physical flat",
20995 .probe = physflat_probe,
20996diff --git a/arch/x86/kernel/apic/apic_noop.c b/arch/x86/kernel/apic/apic_noop.c
20997index 191ce75..2db6d63 100644
20998--- a/arch/x86/kernel/apic/apic_noop.c
20999+++ b/arch/x86/kernel/apic/apic_noop.c
21000@@ -118,7 +118,7 @@ static void noop_apic_write(u32 reg, u32 v)
21001 WARN_ON_ONCE(cpu_has_apic && !disable_apic);
21002 }
21003
21004-struct apic apic_noop = {
21005+struct apic apic_noop __read_only = {
21006 .name = "noop",
21007 .probe = noop_probe,
21008 .acpi_madt_oem_check = NULL,
21009diff --git a/arch/x86/kernel/apic/bigsmp_32.c b/arch/x86/kernel/apic/bigsmp_32.c
21010index d50e364..543bee3 100644
21011--- a/arch/x86/kernel/apic/bigsmp_32.c
21012+++ b/arch/x86/kernel/apic/bigsmp_32.c
21013@@ -152,7 +152,7 @@ static int probe_bigsmp(void)
21014 return dmi_bigsmp;
21015 }
21016
21017-static struct apic apic_bigsmp = {
21018+static struct apic apic_bigsmp __read_only = {
21019
21020 .name = "bigsmp",
21021 .probe = probe_bigsmp,
21022diff --git a/arch/x86/kernel/apic/es7000_32.c b/arch/x86/kernel/apic/es7000_32.c
21023index c552247..587a316 100644
21024--- a/arch/x86/kernel/apic/es7000_32.c
21025+++ b/arch/x86/kernel/apic/es7000_32.c
21026@@ -608,8 +608,7 @@ static int es7000_mps_oem_check_cluster(struct mpc_table *mpc, char *oem,
21027 return ret && es7000_apic_is_cluster();
21028 }
21029
21030-/* We've been warned by a false positive warning.Use __refdata to keep calm. */
21031-static struct apic __refdata apic_es7000_cluster = {
21032+static struct apic apic_es7000_cluster __read_only = {
21033
21034 .name = "es7000",
21035 .probe = probe_es7000,
21036@@ -675,7 +674,7 @@ static struct apic __refdata apic_es7000_cluster = {
21037 .x86_32_early_logical_apicid = es7000_early_logical_apicid,
21038 };
21039
21040-static struct apic __refdata apic_es7000 = {
21041+static struct apic apic_es7000 __read_only = {
21042
21043 .name = "es7000",
21044 .probe = probe_es7000,
21045diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
21046index 6ad4658..38a7b5c 100644
21047--- a/arch/x86/kernel/apic/io_apic.c
21048+++ b/arch/x86/kernel/apic/io_apic.c
21049@@ -1057,7 +1057,7 @@ int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin,
21050 }
21051 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
21052
21053-void lock_vector_lock(void)
21054+void lock_vector_lock(void) __acquires(vector_lock)
21055 {
21056 /* Used to the online set of cpus does not change
21057 * during assign_irq_vector.
21058@@ -1065,7 +1065,7 @@ void lock_vector_lock(void)
21059 raw_spin_lock(&vector_lock);
21060 }
21061
21062-void unlock_vector_lock(void)
21063+void unlock_vector_lock(void) __releases(vector_lock)
21064 {
21065 raw_spin_unlock(&vector_lock);
21066 }
21067@@ -2364,7 +2364,7 @@ static void ack_apic_edge(struct irq_data *data)
21068 ack_APIC_irq();
21069 }
21070
21071-atomic_t irq_mis_count;
21072+atomic_unchecked_t irq_mis_count;
21073
21074 #ifdef CONFIG_GENERIC_PENDING_IRQ
21075 static bool io_apic_level_ack_pending(struct irq_cfg *cfg)
21076@@ -2505,7 +2505,7 @@ static void ack_apic_level(struct irq_data *data)
21077 * at the cpu.
21078 */
21079 if (!(v & (1 << (i & 0x1f)))) {
21080- atomic_inc(&irq_mis_count);
21081+ atomic_inc_unchecked(&irq_mis_count);
21082
21083 eoi_ioapic_irq(irq, cfg);
21084 }
21085diff --git a/arch/x86/kernel/apic/numaq_32.c b/arch/x86/kernel/apic/numaq_32.c
21086index 1e42e8f..daacf44 100644
21087--- a/arch/x86/kernel/apic/numaq_32.c
21088+++ b/arch/x86/kernel/apic/numaq_32.c
21089@@ -455,8 +455,7 @@ static void numaq_setup_portio_remap(void)
21090 (u_long) xquad_portio, (u_long) num_quads*XQUAD_PORTIO_QUAD);
21091 }
21092
21093-/* Use __refdata to keep false positive warning calm. */
21094-static struct apic __refdata apic_numaq = {
21095+static struct apic apic_numaq __read_only = {
21096
21097 .name = "NUMAQ",
21098 .probe = probe_numaq,
21099diff --git a/arch/x86/kernel/apic/probe_32.c b/arch/x86/kernel/apic/probe_32.c
21100index eb35ef9..f184a21 100644
21101--- a/arch/x86/kernel/apic/probe_32.c
21102+++ b/arch/x86/kernel/apic/probe_32.c
21103@@ -72,7 +72,7 @@ static int probe_default(void)
21104 return 1;
21105 }
21106
21107-static struct apic apic_default = {
21108+static struct apic apic_default __read_only = {
21109
21110 .name = "default",
21111 .probe = probe_default,
21112diff --git a/arch/x86/kernel/apic/summit_32.c b/arch/x86/kernel/apic/summit_32.c
21113index 00146f9..5e299b8 100644
21114--- a/arch/x86/kernel/apic/summit_32.c
21115+++ b/arch/x86/kernel/apic/summit_32.c
21116@@ -485,7 +485,7 @@ void setup_summit(void)
21117 }
21118 #endif
21119
21120-static struct apic apic_summit = {
21121+static struct apic apic_summit __read_only = {
21122
21123 .name = "summit",
21124 .probe = probe_summit,
21125diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c
21126index cac85ee..01fa741 100644
21127--- a/arch/x86/kernel/apic/x2apic_cluster.c
21128+++ b/arch/x86/kernel/apic/x2apic_cluster.c
21129@@ -182,7 +182,7 @@ update_clusterinfo(struct notifier_block *nfb, unsigned long action, void *hcpu)
21130 return notifier_from_errno(err);
21131 }
21132
21133-static struct notifier_block __refdata x2apic_cpu_notifier = {
21134+static struct notifier_block x2apic_cpu_notifier = {
21135 .notifier_call = update_clusterinfo,
21136 };
21137
21138@@ -234,7 +234,7 @@ static void cluster_vector_allocation_domain(int cpu, struct cpumask *retmask,
21139 cpumask_and(retmask, mask, per_cpu(cpus_in_cluster, cpu));
21140 }
21141
21142-static struct apic apic_x2apic_cluster = {
21143+static struct apic apic_x2apic_cluster __read_only = {
21144
21145 .name = "cluster x2apic",
21146 .probe = x2apic_cluster_probe,
21147diff --git a/arch/x86/kernel/apic/x2apic_phys.c b/arch/x86/kernel/apic/x2apic_phys.c
21148index de231e3..1d1b2ee 100644
21149--- a/arch/x86/kernel/apic/x2apic_phys.c
21150+++ b/arch/x86/kernel/apic/x2apic_phys.c
21151@@ -88,7 +88,7 @@ static int x2apic_phys_probe(void)
21152 return apic == &apic_x2apic_phys;
21153 }
21154
21155-static struct apic apic_x2apic_phys = {
21156+static struct apic apic_x2apic_phys __read_only = {
21157
21158 .name = "physical x2apic",
21159 .probe = x2apic_phys_probe,
21160diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
21161index d263b13..963258b 100644
21162--- a/arch/x86/kernel/apic/x2apic_uv_x.c
21163+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
21164@@ -350,7 +350,7 @@ static int uv_probe(void)
21165 return apic == &apic_x2apic_uv_x;
21166 }
21167
21168-static struct apic __refdata apic_x2apic_uv_x = {
21169+static struct apic apic_x2apic_uv_x __read_only = {
21170
21171 .name = "UV large system",
21172 .probe = uv_probe,
21173diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
21174index 3ab0343..814c4787 100644
21175--- a/arch/x86/kernel/apm_32.c
21176+++ b/arch/x86/kernel/apm_32.c
21177@@ -433,7 +433,7 @@ static DEFINE_MUTEX(apm_mutex);
21178 * This is for buggy BIOS's that refer to (real mode) segment 0x40
21179 * even though they are called in protected mode.
21180 */
21181-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
21182+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
21183 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
21184
21185 static const char driver_version[] = "1.16ac"; /* no spaces */
21186@@ -611,7 +611,10 @@ static long __apm_bios_call(void *_call)
21187 BUG_ON(cpu != 0);
21188 gdt = get_cpu_gdt_table(cpu);
21189 save_desc_40 = gdt[0x40 / 8];
21190+
21191+ pax_open_kernel();
21192 gdt[0x40 / 8] = bad_bios_desc;
21193+ pax_close_kernel();
21194
21195 apm_irq_save(flags);
21196 APM_DO_SAVE_SEGS;
21197@@ -620,7 +623,11 @@ static long __apm_bios_call(void *_call)
21198 &call->esi);
21199 APM_DO_RESTORE_SEGS;
21200 apm_irq_restore(flags);
21201+
21202+ pax_open_kernel();
21203 gdt[0x40 / 8] = save_desc_40;
21204+ pax_close_kernel();
21205+
21206 put_cpu();
21207
21208 return call->eax & 0xff;
21209@@ -687,7 +694,10 @@ static long __apm_bios_call_simple(void *_call)
21210 BUG_ON(cpu != 0);
21211 gdt = get_cpu_gdt_table(cpu);
21212 save_desc_40 = gdt[0x40 / 8];
21213+
21214+ pax_open_kernel();
21215 gdt[0x40 / 8] = bad_bios_desc;
21216+ pax_close_kernel();
21217
21218 apm_irq_save(flags);
21219 APM_DO_SAVE_SEGS;
21220@@ -695,7 +705,11 @@ static long __apm_bios_call_simple(void *_call)
21221 &call->eax);
21222 APM_DO_RESTORE_SEGS;
21223 apm_irq_restore(flags);
21224+
21225+ pax_open_kernel();
21226 gdt[0x40 / 8] = save_desc_40;
21227+ pax_close_kernel();
21228+
21229 put_cpu();
21230 return error;
21231 }
21232@@ -2362,12 +2376,15 @@ static int __init apm_init(void)
21233 * code to that CPU.
21234 */
21235 gdt = get_cpu_gdt_table(0);
21236+
21237+ pax_open_kernel();
21238 set_desc_base(&gdt[APM_CS >> 3],
21239 (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
21240 set_desc_base(&gdt[APM_CS_16 >> 3],
21241 (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
21242 set_desc_base(&gdt[APM_DS >> 3],
21243 (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
21244+ pax_close_kernel();
21245
21246 proc_create("apm", 0, NULL, &apm_file_ops);
21247
21248diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c
21249index 9f6b934..cf5ffb3 100644
21250--- a/arch/x86/kernel/asm-offsets.c
21251+++ b/arch/x86/kernel/asm-offsets.c
21252@@ -32,6 +32,8 @@ void common(void) {
21253 OFFSET(TI_flags, thread_info, flags);
21254 OFFSET(TI_status, thread_info, status);
21255 OFFSET(TI_addr_limit, thread_info, addr_limit);
21256+ OFFSET(TI_lowest_stack, thread_info, lowest_stack);
21257+ DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
21258
21259 BLANK();
21260 OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
21261@@ -52,8 +54,26 @@ void common(void) {
21262 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
21263 OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
21264 OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
21265+
21266+#ifdef CONFIG_PAX_KERNEXEC
21267+ OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
21268 #endif
21269
21270+#ifdef CONFIG_PAX_MEMORY_UDEREF
21271+ OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
21272+ OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
21273+#ifdef CONFIG_X86_64
21274+ OFFSET(PV_MMU_set_pgd_batched, pv_mmu_ops, set_pgd_batched);
21275+#endif
21276+#endif
21277+
21278+#endif
21279+
21280+ BLANK();
21281+ DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
21282+ DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
21283+ DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
21284+
21285 #ifdef CONFIG_XEN
21286 BLANK();
21287 OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
21288diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
21289index e7c798b..2b2019b 100644
21290--- a/arch/x86/kernel/asm-offsets_64.c
21291+++ b/arch/x86/kernel/asm-offsets_64.c
21292@@ -77,6 +77,7 @@ int main(void)
21293 BLANK();
21294 #undef ENTRY
21295
21296+ DEFINE(TSS_size, sizeof(struct tss_struct));
21297 OFFSET(TSS_ist, tss_struct, x86_tss.ist);
21298 BLANK();
21299
21300diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
21301index 7fd54f0..0691410 100644
21302--- a/arch/x86/kernel/cpu/Makefile
21303+++ b/arch/x86/kernel/cpu/Makefile
21304@@ -8,10 +8,6 @@ CFLAGS_REMOVE_common.o = -pg
21305 CFLAGS_REMOVE_perf_event.o = -pg
21306 endif
21307
21308-# Make sure load_percpu_segment has no stackprotector
21309-nostackp := $(call cc-option, -fno-stack-protector)
21310-CFLAGS_common.o := $(nostackp)
21311-
21312 obj-y := intel_cacheinfo.o scattered.o topology.o
21313 obj-y += proc.o capflags.o powerflags.o common.o
21314 obj-y += rdrand.o
21315diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
21316index c67ffa6..f41fbbf 100644
21317--- a/arch/x86/kernel/cpu/amd.c
21318+++ b/arch/x86/kernel/cpu/amd.c
21319@@ -752,7 +752,7 @@ static void init_amd(struct cpuinfo_x86 *c)
21320 static unsigned int amd_size_cache(struct cpuinfo_x86 *c, unsigned int size)
21321 {
21322 /* AMD errata T13 (order #21922) */
21323- if ((c->x86 == 6)) {
21324+ if (c->x86 == 6) {
21325 /* Duron Rev A0 */
21326 if (c->x86_model == 3 && c->x86_mask == 0)
21327 size = 64;
21328diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
21329index 8e28bf2..bf5c0d2 100644
21330--- a/arch/x86/kernel/cpu/common.c
21331+++ b/arch/x86/kernel/cpu/common.c
21332@@ -88,60 +88,6 @@ static const struct cpu_dev default_cpu = {
21333
21334 static const struct cpu_dev *this_cpu = &default_cpu;
21335
21336-DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
21337-#ifdef CONFIG_X86_64
21338- /*
21339- * We need valid kernel segments for data and code in long mode too
21340- * IRET will check the segment types kkeil 2000/10/28
21341- * Also sysret mandates a special GDT layout
21342- *
21343- * TLS descriptors are currently at a different place compared to i386.
21344- * Hopefully nobody expects them at a fixed place (Wine?)
21345- */
21346- [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
21347- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
21348- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
21349- [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
21350- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
21351- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
21352-#else
21353- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
21354- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
21355- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
21356- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
21357- /*
21358- * Segments used for calling PnP BIOS have byte granularity.
21359- * They code segments and data segments have fixed 64k limits,
21360- * the transfer segment sizes are set at run time.
21361- */
21362- /* 32-bit code */
21363- [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
21364- /* 16-bit code */
21365- [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
21366- /* 16-bit data */
21367- [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
21368- /* 16-bit data */
21369- [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
21370- /* 16-bit data */
21371- [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
21372- /*
21373- * The APM segments have byte granularity and their bases
21374- * are set at run time. All have 64k limits.
21375- */
21376- /* 32-bit code */
21377- [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
21378- /* 16-bit code */
21379- [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
21380- /* data */
21381- [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
21382-
21383- [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
21384- [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
21385- GDT_STACK_CANARY_INIT
21386-#endif
21387-} };
21388-EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
21389-
21390 static int __init x86_xsave_setup(char *s)
21391 {
21392 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
21393@@ -293,6 +239,59 @@ static __always_inline void setup_smap(struct cpuinfo_x86 *c)
21394 }
21395 }
21396
21397+#ifdef CONFIG_X86_64
21398+static __init int setup_disable_pcid(char *arg)
21399+{
21400+ setup_clear_cpu_cap(X86_FEATURE_PCID);
21401+ setup_clear_cpu_cap(X86_FEATURE_INVPCID);
21402+
21403+#ifdef CONFIG_PAX_MEMORY_UDEREF
21404+ if (clone_pgd_mask != ~(pgdval_t)0UL)
21405+ pax_user_shadow_base = 1UL << TASK_SIZE_MAX_SHIFT;
21406+#endif
21407+
21408+ return 1;
21409+}
21410+__setup("nopcid", setup_disable_pcid);
21411+
21412+static void setup_pcid(struct cpuinfo_x86 *c)
21413+{
21414+ if (!cpu_has(c, X86_FEATURE_PCID)) {
21415+ clear_cpu_cap(c, X86_FEATURE_INVPCID);
21416+
21417+#ifdef CONFIG_PAX_MEMORY_UDEREF
21418+ if (clone_pgd_mask != ~(pgdval_t)0UL) {
21419+ pax_open_kernel();
21420+ pax_user_shadow_base = 1UL << TASK_SIZE_MAX_SHIFT;
21421+ pax_close_kernel();
21422+ printk("PAX: slow and weak UDEREF enabled\n");
21423+ } else
21424+ printk("PAX: UDEREF disabled\n");
21425+#endif
21426+
21427+ return;
21428+ }
21429+
21430+ printk("PAX: PCID detected\n");
21431+ set_in_cr4(X86_CR4_PCIDE);
21432+
21433+#ifdef CONFIG_PAX_MEMORY_UDEREF
21434+ pax_open_kernel();
21435+ clone_pgd_mask = ~(pgdval_t)0UL;
21436+ pax_close_kernel();
21437+ if (pax_user_shadow_base)
21438+ printk("PAX: weak UDEREF enabled\n");
21439+ else {
21440+ set_cpu_cap(c, X86_FEATURE_STRONGUDEREF);
21441+ printk("PAX: strong UDEREF enabled\n");
21442+ }
21443+#endif
21444+
21445+ if (cpu_has(c, X86_FEATURE_INVPCID))
21446+ printk("PAX: INVPCID detected\n");
21447+}
21448+#endif
21449+
21450 /*
21451 * Some CPU features depend on higher CPUID levels, which may not always
21452 * be available due to CPUID level capping or broken virtualization
21453@@ -393,7 +392,7 @@ void switch_to_new_gdt(int cpu)
21454 {
21455 struct desc_ptr gdt_descr;
21456
21457- gdt_descr.address = (long)get_cpu_gdt_table(cpu);
21458+ gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
21459 gdt_descr.size = GDT_SIZE - 1;
21460 load_gdt(&gdt_descr);
21461 /* Reload the per-cpu base */
21462@@ -883,6 +882,10 @@ static void identify_cpu(struct cpuinfo_x86 *c)
21463 setup_smep(c);
21464 setup_smap(c);
21465
21466+#ifdef CONFIG_X86_64
21467+ setup_pcid(c);
21468+#endif
21469+
21470 /*
21471 * The vendor-specific functions might have changed features.
21472 * Now we do "generic changes."
21473@@ -891,6 +894,10 @@ static void identify_cpu(struct cpuinfo_x86 *c)
21474 /* Filter out anything that depends on CPUID levels we don't have */
21475 filter_cpuid_features(c, true);
21476
21477+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
21478+ setup_clear_cpu_cap(X86_FEATURE_SEP);
21479+#endif
21480+
21481 /* If the model name is still unset, do table lookup. */
21482 if (!c->x86_model_id[0]) {
21483 const char *p;
21484@@ -1078,10 +1085,12 @@ static __init int setup_disablecpuid(char *arg)
21485 }
21486 __setup("clearcpuid=", setup_disablecpuid);
21487
21488+DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
21489+EXPORT_PER_CPU_SYMBOL(current_tinfo);
21490+
21491 #ifdef CONFIG_X86_64
21492-struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
21493-struct desc_ptr debug_idt_descr = { NR_VECTORS * 16 - 1,
21494- (unsigned long) debug_idt_table };
21495+struct desc_ptr idt_descr __read_only = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
21496+const struct desc_ptr debug_idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) debug_idt_table };
21497
21498 DEFINE_PER_CPU_FIRST(union irq_stack_union,
21499 irq_stack_union) __aligned(PAGE_SIZE) __visible;
21500@@ -1095,7 +1104,7 @@ DEFINE_PER_CPU(struct task_struct *, current_task) ____cacheline_aligned =
21501 EXPORT_PER_CPU_SYMBOL(current_task);
21502
21503 DEFINE_PER_CPU(unsigned long, kernel_stack) =
21504- (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
21505+ (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
21506 EXPORT_PER_CPU_SYMBOL(kernel_stack);
21507
21508 DEFINE_PER_CPU(char *, irq_stack_ptr) =
21509@@ -1245,7 +1254,7 @@ void cpu_init(void)
21510 load_ucode_ap();
21511
21512 cpu = stack_smp_processor_id();
21513- t = &per_cpu(init_tss, cpu);
21514+ t = init_tss + cpu;
21515 oist = &per_cpu(orig_ist, cpu);
21516
21517 #ifdef CONFIG_NUMA
21518@@ -1280,7 +1289,6 @@ void cpu_init(void)
21519 wrmsrl(MSR_KERNEL_GS_BASE, 0);
21520 barrier();
21521
21522- x86_configure_nx();
21523 enable_x2apic();
21524
21525 /*
21526@@ -1332,7 +1340,7 @@ void cpu_init(void)
21527 {
21528 int cpu = smp_processor_id();
21529 struct task_struct *curr = current;
21530- struct tss_struct *t = &per_cpu(init_tss, cpu);
21531+ struct tss_struct *t = init_tss + cpu;
21532 struct thread_struct *thread = &curr->thread;
21533
21534 show_ucode_info_early();
21535diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
21536index 0641113..06f5ba4 100644
21537--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
21538+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
21539@@ -1014,6 +1014,22 @@ static struct attribute *default_attrs[] = {
21540 };
21541
21542 #ifdef CONFIG_AMD_NB
21543+static struct attribute *default_attrs_amd_nb[] = {
21544+ &type.attr,
21545+ &level.attr,
21546+ &coherency_line_size.attr,
21547+ &physical_line_partition.attr,
21548+ &ways_of_associativity.attr,
21549+ &number_of_sets.attr,
21550+ &size.attr,
21551+ &shared_cpu_map.attr,
21552+ &shared_cpu_list.attr,
21553+ NULL,
21554+ NULL,
21555+ NULL,
21556+ NULL
21557+};
21558+
21559 static struct attribute **amd_l3_attrs(void)
21560 {
21561 static struct attribute **attrs;
21562@@ -1024,18 +1040,7 @@ static struct attribute **amd_l3_attrs(void)
21563
21564 n = ARRAY_SIZE(default_attrs);
21565
21566- if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
21567- n += 2;
21568-
21569- if (amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
21570- n += 1;
21571-
21572- attrs = kzalloc(n * sizeof (struct attribute *), GFP_KERNEL);
21573- if (attrs == NULL)
21574- return attrs = default_attrs;
21575-
21576- for (n = 0; default_attrs[n]; n++)
21577- attrs[n] = default_attrs[n];
21578+ attrs = default_attrs_amd_nb;
21579
21580 if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE)) {
21581 attrs[n++] = &cache_disable_0.attr;
21582@@ -1086,6 +1091,13 @@ static struct kobj_type ktype_cache = {
21583 .default_attrs = default_attrs,
21584 };
21585
21586+#ifdef CONFIG_AMD_NB
21587+static struct kobj_type ktype_cache_amd_nb = {
21588+ .sysfs_ops = &sysfs_ops,
21589+ .default_attrs = default_attrs_amd_nb,
21590+};
21591+#endif
21592+
21593 static struct kobj_type ktype_percpu_entry = {
21594 .sysfs_ops = &sysfs_ops,
21595 };
21596@@ -1151,20 +1163,26 @@ static int cache_add_dev(struct device *dev)
21597 return retval;
21598 }
21599
21600+#ifdef CONFIG_AMD_NB
21601+ amd_l3_attrs();
21602+#endif
21603+
21604 for (i = 0; i < num_cache_leaves; i++) {
21605+ struct kobj_type *ktype;
21606+
21607 this_object = INDEX_KOBJECT_PTR(cpu, i);
21608 this_object->cpu = cpu;
21609 this_object->index = i;
21610
21611 this_leaf = CPUID4_INFO_IDX(cpu, i);
21612
21613- ktype_cache.default_attrs = default_attrs;
21614+ ktype = &ktype_cache;
21615 #ifdef CONFIG_AMD_NB
21616 if (this_leaf->base.nb)
21617- ktype_cache.default_attrs = amd_l3_attrs();
21618+ ktype = &ktype_cache_amd_nb;
21619 #endif
21620 retval = kobject_init_and_add(&(this_object->kobj),
21621- &ktype_cache,
21622+ ktype,
21623 per_cpu(ici_cache_kobject, cpu),
21624 "index%1lu", i);
21625 if (unlikely(retval)) {
21626diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
21627index 4d5419b..95f11bb 100644
21628--- a/arch/x86/kernel/cpu/mcheck/mce.c
21629+++ b/arch/x86/kernel/cpu/mcheck/mce.c
21630@@ -45,6 +45,7 @@
21631 #include <asm/processor.h>
21632 #include <asm/mce.h>
21633 #include <asm/msr.h>
21634+#include <asm/local.h>
21635
21636 #include "mce-internal.h"
21637
21638@@ -258,7 +259,7 @@ static void print_mce(struct mce *m)
21639 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
21640 m->cs, m->ip);
21641
21642- if (m->cs == __KERNEL_CS)
21643+ if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
21644 print_symbol("{%s}", m->ip);
21645 pr_cont("\n");
21646 }
21647@@ -291,10 +292,10 @@ static void print_mce(struct mce *m)
21648
21649 #define PANIC_TIMEOUT 5 /* 5 seconds */
21650
21651-static atomic_t mce_paniced;
21652+static atomic_unchecked_t mce_paniced;
21653
21654 static int fake_panic;
21655-static atomic_t mce_fake_paniced;
21656+static atomic_unchecked_t mce_fake_paniced;
21657
21658 /* Panic in progress. Enable interrupts and wait for final IPI */
21659 static void wait_for_panic(void)
21660@@ -318,7 +319,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
21661 /*
21662 * Make sure only one CPU runs in machine check panic
21663 */
21664- if (atomic_inc_return(&mce_paniced) > 1)
21665+ if (atomic_inc_return_unchecked(&mce_paniced) > 1)
21666 wait_for_panic();
21667 barrier();
21668
21669@@ -326,7 +327,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
21670 console_verbose();
21671 } else {
21672 /* Don't log too much for fake panic */
21673- if (atomic_inc_return(&mce_fake_paniced) > 1)
21674+ if (atomic_inc_return_unchecked(&mce_fake_paniced) > 1)
21675 return;
21676 }
21677 /* First print corrected ones that are still unlogged */
21678@@ -365,7 +366,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
21679 if (!fake_panic) {
21680 if (panic_timeout == 0)
21681 panic_timeout = mca_cfg.panic_timeout;
21682- panic(msg);
21683+ panic("%s", msg);
21684 } else
21685 pr_emerg(HW_ERR "Fake kernel panic: %s\n", msg);
21686 }
21687@@ -695,7 +696,7 @@ static int mce_timed_out(u64 *t)
21688 * might have been modified by someone else.
21689 */
21690 rmb();
21691- if (atomic_read(&mce_paniced))
21692+ if (atomic_read_unchecked(&mce_paniced))
21693 wait_for_panic();
21694 if (!mca_cfg.monarch_timeout)
21695 goto out;
21696@@ -1666,7 +1667,7 @@ static void unexpected_machine_check(struct pt_regs *regs, long error_code)
21697 }
21698
21699 /* Call the installed machine check handler for this CPU setup. */
21700-void (*machine_check_vector)(struct pt_regs *, long error_code) =
21701+void (*machine_check_vector)(struct pt_regs *, long error_code) __read_only =
21702 unexpected_machine_check;
21703
21704 /*
21705@@ -1689,7 +1690,9 @@ void mcheck_cpu_init(struct cpuinfo_x86 *c)
21706 return;
21707 }
21708
21709+ pax_open_kernel();
21710 machine_check_vector = do_machine_check;
21711+ pax_close_kernel();
21712
21713 __mcheck_cpu_init_generic();
21714 __mcheck_cpu_init_vendor(c);
21715@@ -1703,7 +1706,7 @@ void mcheck_cpu_init(struct cpuinfo_x86 *c)
21716 */
21717
21718 static DEFINE_SPINLOCK(mce_chrdev_state_lock);
21719-static int mce_chrdev_open_count; /* #times opened */
21720+static local_t mce_chrdev_open_count; /* #times opened */
21721 static int mce_chrdev_open_exclu; /* already open exclusive? */
21722
21723 static int mce_chrdev_open(struct inode *inode, struct file *file)
21724@@ -1711,7 +1714,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
21725 spin_lock(&mce_chrdev_state_lock);
21726
21727 if (mce_chrdev_open_exclu ||
21728- (mce_chrdev_open_count && (file->f_flags & O_EXCL))) {
21729+ (local_read(&mce_chrdev_open_count) && (file->f_flags & O_EXCL))) {
21730 spin_unlock(&mce_chrdev_state_lock);
21731
21732 return -EBUSY;
21733@@ -1719,7 +1722,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
21734
21735 if (file->f_flags & O_EXCL)
21736 mce_chrdev_open_exclu = 1;
21737- mce_chrdev_open_count++;
21738+ local_inc(&mce_chrdev_open_count);
21739
21740 spin_unlock(&mce_chrdev_state_lock);
21741
21742@@ -1730,7 +1733,7 @@ static int mce_chrdev_release(struct inode *inode, struct file *file)
21743 {
21744 spin_lock(&mce_chrdev_state_lock);
21745
21746- mce_chrdev_open_count--;
21747+ local_dec(&mce_chrdev_open_count);
21748 mce_chrdev_open_exclu = 0;
21749
21750 spin_unlock(&mce_chrdev_state_lock);
21751@@ -2406,7 +2409,7 @@ static __init void mce_init_banks(void)
21752
21753 for (i = 0; i < mca_cfg.banks; i++) {
21754 struct mce_bank *b = &mce_banks[i];
21755- struct device_attribute *a = &b->attr;
21756+ device_attribute_no_const *a = &b->attr;
21757
21758 sysfs_attr_init(&a->attr);
21759 a->attr.name = b->attrname;
21760@@ -2474,7 +2477,7 @@ struct dentry *mce_get_debugfs_dir(void)
21761 static void mce_reset(void)
21762 {
21763 cpu_missing = 0;
21764- atomic_set(&mce_fake_paniced, 0);
21765+ atomic_set_unchecked(&mce_fake_paniced, 0);
21766 atomic_set(&mce_executing, 0);
21767 atomic_set(&mce_callin, 0);
21768 atomic_set(&global_nwo, 0);
21769diff --git a/arch/x86/kernel/cpu/mcheck/p5.c b/arch/x86/kernel/cpu/mcheck/p5.c
21770index a304298..49b6d06 100644
21771--- a/arch/x86/kernel/cpu/mcheck/p5.c
21772+++ b/arch/x86/kernel/cpu/mcheck/p5.c
21773@@ -10,6 +10,7 @@
21774 #include <asm/processor.h>
21775 #include <asm/mce.h>
21776 #include <asm/msr.h>
21777+#include <asm/pgtable.h>
21778
21779 /* By default disabled */
21780 int mce_p5_enabled __read_mostly;
21781@@ -48,7 +49,9 @@ void intel_p5_mcheck_init(struct cpuinfo_x86 *c)
21782 if (!cpu_has(c, X86_FEATURE_MCE))
21783 return;
21784
21785+ pax_open_kernel();
21786 machine_check_vector = pentium_machine_check;
21787+ pax_close_kernel();
21788 /* Make sure the vector pointer is visible before we enable MCEs: */
21789 wmb();
21790
21791diff --git a/arch/x86/kernel/cpu/mcheck/winchip.c b/arch/x86/kernel/cpu/mcheck/winchip.c
21792index 7dc5564..1273569 100644
21793--- a/arch/x86/kernel/cpu/mcheck/winchip.c
21794+++ b/arch/x86/kernel/cpu/mcheck/winchip.c
21795@@ -9,6 +9,7 @@
21796 #include <asm/processor.h>
21797 #include <asm/mce.h>
21798 #include <asm/msr.h>
21799+#include <asm/pgtable.h>
21800
21801 /* Machine check handler for WinChip C6: */
21802 static void winchip_machine_check(struct pt_regs *regs, long error_code)
21803@@ -22,7 +23,9 @@ void winchip_mcheck_init(struct cpuinfo_x86 *c)
21804 {
21805 u32 lo, hi;
21806
21807+ pax_open_kernel();
21808 machine_check_vector = winchip_machine_check;
21809+ pax_close_kernel();
21810 /* Make sure the vector pointer is visible before we enable MCEs: */
21811 wmb();
21812
21813diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c
21814index 15c9876..0a43909 100644
21815--- a/arch/x86/kernel/cpu/microcode/core.c
21816+++ b/arch/x86/kernel/cpu/microcode/core.c
21817@@ -513,7 +513,7 @@ mc_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu)
21818 return NOTIFY_OK;
21819 }
21820
21821-static struct notifier_block __refdata mc_cpu_notifier = {
21822+static struct notifier_block mc_cpu_notifier = {
21823 .notifier_call = mc_cpu_callback,
21824 };
21825
21826diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c
21827index a276fa7..e66810f 100644
21828--- a/arch/x86/kernel/cpu/microcode/intel.c
21829+++ b/arch/x86/kernel/cpu/microcode/intel.c
21830@@ -293,13 +293,13 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device,
21831
21832 static int get_ucode_user(void *to, const void *from, size_t n)
21833 {
21834- return copy_from_user(to, from, n);
21835+ return copy_from_user(to, (const void __force_user *)from, n);
21836 }
21837
21838 static enum ucode_state
21839 request_microcode_user(int cpu, const void __user *buf, size_t size)
21840 {
21841- return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
21842+ return generic_load_microcode(cpu, (__force_kernel void *)buf, size, &get_ucode_user);
21843 }
21844
21845 static void microcode_fini_cpu(int cpu)
21846diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
21847index f961de9..8a9d332 100644
21848--- a/arch/x86/kernel/cpu/mtrr/main.c
21849+++ b/arch/x86/kernel/cpu/mtrr/main.c
21850@@ -66,7 +66,7 @@ static DEFINE_MUTEX(mtrr_mutex);
21851 u64 size_or_mask, size_and_mask;
21852 static bool mtrr_aps_delayed_init;
21853
21854-static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
21855+static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
21856
21857 const struct mtrr_ops *mtrr_if;
21858
21859diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.h b/arch/x86/kernel/cpu/mtrr/mtrr.h
21860index df5e41f..816c719 100644
21861--- a/arch/x86/kernel/cpu/mtrr/mtrr.h
21862+++ b/arch/x86/kernel/cpu/mtrr/mtrr.h
21863@@ -25,7 +25,7 @@ struct mtrr_ops {
21864 int (*validate_add_page)(unsigned long base, unsigned long size,
21865 unsigned int type);
21866 int (*have_wrcomb)(void);
21867-};
21868+} __do_const;
21869
21870 extern int generic_get_free_region(unsigned long base, unsigned long size,
21871 int replace_reg);
21872diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
21873index 79f9f84..38ace52 100644
21874--- a/arch/x86/kernel/cpu/perf_event.c
21875+++ b/arch/x86/kernel/cpu/perf_event.c
21876@@ -1351,7 +1351,7 @@ static void __init pmu_check_apic(void)
21877 pr_info("no hardware sampling interrupt available.\n");
21878 }
21879
21880-static struct attribute_group x86_pmu_format_group = {
21881+static attribute_group_no_const x86_pmu_format_group = {
21882 .name = "format",
21883 .attrs = NULL,
21884 };
21885@@ -1450,7 +1450,7 @@ static struct attribute *events_attr[] = {
21886 NULL,
21887 };
21888
21889-static struct attribute_group x86_pmu_events_group = {
21890+static attribute_group_no_const x86_pmu_events_group = {
21891 .name = "events",
21892 .attrs = events_attr,
21893 };
21894@@ -1971,7 +1971,7 @@ static unsigned long get_segment_base(unsigned int segment)
21895 if (idx > GDT_ENTRIES)
21896 return 0;
21897
21898- desc = __this_cpu_ptr(&gdt_page.gdt[0]);
21899+ desc = get_cpu_gdt_table(smp_processor_id());
21900 }
21901
21902 return get_desc_base(desc + idx);
21903@@ -2061,7 +2061,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
21904 break;
21905
21906 perf_callchain_store(entry, frame.return_address);
21907- fp = frame.next_frame;
21908+ fp = (const void __force_user *)frame.next_frame;
21909 }
21910 }
21911
21912diff --git a/arch/x86/kernel/cpu/perf_event_amd_iommu.c b/arch/x86/kernel/cpu/perf_event_amd_iommu.c
21913index 639d128..e92d7e5 100644
21914--- a/arch/x86/kernel/cpu/perf_event_amd_iommu.c
21915+++ b/arch/x86/kernel/cpu/perf_event_amd_iommu.c
21916@@ -405,7 +405,7 @@ static void perf_iommu_del(struct perf_event *event, int flags)
21917 static __init int _init_events_attrs(struct perf_amd_iommu *perf_iommu)
21918 {
21919 struct attribute **attrs;
21920- struct attribute_group *attr_group;
21921+ attribute_group_no_const *attr_group;
21922 int i = 0, j;
21923
21924 while (amd_iommu_v2_event_descs[i].attr.attr.name)
21925diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
21926index aa333d9..f9db700 100644
21927--- a/arch/x86/kernel/cpu/perf_event_intel.c
21928+++ b/arch/x86/kernel/cpu/perf_event_intel.c
21929@@ -2309,10 +2309,10 @@ __init int intel_pmu_init(void)
21930 x86_pmu.num_counters_fixed = max((int)edx.split.num_counters_fixed, 3);
21931
21932 if (boot_cpu_has(X86_FEATURE_PDCM)) {
21933- u64 capabilities;
21934+ u64 capabilities = x86_pmu.intel_cap.capabilities;
21935
21936- rdmsrl(MSR_IA32_PERF_CAPABILITIES, capabilities);
21937- x86_pmu.intel_cap.capabilities = capabilities;
21938+ if (rdmsrl_safe(MSR_IA32_PERF_CAPABILITIES, &x86_pmu.intel_cap.capabilities))
21939+ x86_pmu.intel_cap.capabilities = capabilities;
21940 }
21941
21942 intel_ds_init();
21943diff --git a/arch/x86/kernel/cpu/perf_event_intel_rapl.c b/arch/x86/kernel/cpu/perf_event_intel_rapl.c
21944index 5ad35ad..e0a3960 100644
21945--- a/arch/x86/kernel/cpu/perf_event_intel_rapl.c
21946+++ b/arch/x86/kernel/cpu/perf_event_intel_rapl.c
21947@@ -425,7 +425,7 @@ static struct attribute *rapl_events_cln_attr[] = {
21948 NULL,
21949 };
21950
21951-static struct attribute_group rapl_pmu_events_group = {
21952+static attribute_group_no_const rapl_pmu_events_group __read_only = {
21953 .name = "events",
21954 .attrs = NULL, /* patched at runtime */
21955 };
21956diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
21957index 047f540..afdeba0 100644
21958--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c
21959+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
21960@@ -3326,7 +3326,7 @@ static void __init uncore_types_exit(struct intel_uncore_type **types)
21961 static int __init uncore_type_init(struct intel_uncore_type *type)
21962 {
21963 struct intel_uncore_pmu *pmus;
21964- struct attribute_group *attr_group;
21965+ attribute_group_no_const *attr_group;
21966 struct attribute **attrs;
21967 int i, j;
21968
21969diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.h b/arch/x86/kernel/cpu/perf_event_intel_uncore.h
21970index a80ab71..4089da5 100644
21971--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.h
21972+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.h
21973@@ -498,7 +498,7 @@ struct intel_uncore_box {
21974 struct uncore_event_desc {
21975 struct kobj_attribute attr;
21976 const char *config;
21977-};
21978+} __do_const;
21979
21980 #define INTEL_UNCORE_EVENT_DESC(_name, _config) \
21981 { \
21982diff --git a/arch/x86/kernel/cpuid.c b/arch/x86/kernel/cpuid.c
21983index 7d9481c..99c7e4b 100644
21984--- a/arch/x86/kernel/cpuid.c
21985+++ b/arch/x86/kernel/cpuid.c
21986@@ -170,7 +170,7 @@ static int cpuid_class_cpu_callback(struct notifier_block *nfb,
21987 return notifier_from_errno(err);
21988 }
21989
21990-static struct notifier_block __refdata cpuid_class_cpu_notifier =
21991+static struct notifier_block cpuid_class_cpu_notifier =
21992 {
21993 .notifier_call = cpuid_class_cpu_callback,
21994 };
21995diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
21996index a57902e..ebaae2a 100644
21997--- a/arch/x86/kernel/crash.c
21998+++ b/arch/x86/kernel/crash.c
21999@@ -57,10 +57,8 @@ static void kdump_nmi_callback(int cpu, struct pt_regs *regs)
22000 {
22001 #ifdef CONFIG_X86_32
22002 struct pt_regs fixed_regs;
22003-#endif
22004
22005-#ifdef CONFIG_X86_32
22006- if (!user_mode_vm(regs)) {
22007+ if (!user_mode(regs)) {
22008 crash_fixup_ss_esp(&fixed_regs, regs);
22009 regs = &fixed_regs;
22010 }
22011diff --git a/arch/x86/kernel/crash_dump_64.c b/arch/x86/kernel/crash_dump_64.c
22012index afa64ad..dce67dd 100644
22013--- a/arch/x86/kernel/crash_dump_64.c
22014+++ b/arch/x86/kernel/crash_dump_64.c
22015@@ -36,7 +36,7 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
22016 return -ENOMEM;
22017
22018 if (userbuf) {
22019- if (copy_to_user(buf, vaddr + offset, csize)) {
22020+ if (copy_to_user((char __force_user *)buf, vaddr + offset, csize)) {
22021 iounmap(vaddr);
22022 return -EFAULT;
22023 }
22024diff --git a/arch/x86/kernel/doublefault.c b/arch/x86/kernel/doublefault.c
22025index f6dfd93..892ade4 100644
22026--- a/arch/x86/kernel/doublefault.c
22027+++ b/arch/x86/kernel/doublefault.c
22028@@ -12,7 +12,7 @@
22029
22030 #define DOUBLEFAULT_STACKSIZE (1024)
22031 static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
22032-#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
22033+#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
22034
22035 #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
22036
22037@@ -22,7 +22,7 @@ static void doublefault_fn(void)
22038 unsigned long gdt, tss;
22039
22040 native_store_gdt(&gdt_desc);
22041- gdt = gdt_desc.address;
22042+ gdt = (unsigned long)gdt_desc.address;
22043
22044 printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
22045
22046@@ -59,10 +59,10 @@ struct tss_struct doublefault_tss __cacheline_aligned = {
22047 /* 0x2 bit is always set */
22048 .flags = X86_EFLAGS_SF | 0x2,
22049 .sp = STACK_START,
22050- .es = __USER_DS,
22051+ .es = __KERNEL_DS,
22052 .cs = __KERNEL_CS,
22053 .ss = __KERNEL_DS,
22054- .ds = __USER_DS,
22055+ .ds = __KERNEL_DS,
22056 .fs = __KERNEL_PERCPU,
22057
22058 .__cr3 = __pa_nodebug(swapper_pg_dir),
22059diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
22060index d9c12d3..7858b62 100644
22061--- a/arch/x86/kernel/dumpstack.c
22062+++ b/arch/x86/kernel/dumpstack.c
22063@@ -2,6 +2,9 @@
22064 * Copyright (C) 1991, 1992 Linus Torvalds
22065 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
22066 */
22067+#ifdef CONFIG_GRKERNSEC_HIDESYM
22068+#define __INCLUDED_BY_HIDESYM 1
22069+#endif
22070 #include <linux/kallsyms.h>
22071 #include <linux/kprobes.h>
22072 #include <linux/uaccess.h>
22073@@ -40,16 +43,14 @@ void printk_address(unsigned long address)
22074 static void
22075 print_ftrace_graph_addr(unsigned long addr, void *data,
22076 const struct stacktrace_ops *ops,
22077- struct thread_info *tinfo, int *graph)
22078+ struct task_struct *task, int *graph)
22079 {
22080- struct task_struct *task;
22081 unsigned long ret_addr;
22082 int index;
22083
22084 if (addr != (unsigned long)return_to_handler)
22085 return;
22086
22087- task = tinfo->task;
22088 index = task->curr_ret_stack;
22089
22090 if (!task->ret_stack || index < *graph)
22091@@ -66,7 +67,7 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
22092 static inline void
22093 print_ftrace_graph_addr(unsigned long addr, void *data,
22094 const struct stacktrace_ops *ops,
22095- struct thread_info *tinfo, int *graph)
22096+ struct task_struct *task, int *graph)
22097 { }
22098 #endif
22099
22100@@ -77,10 +78,8 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
22101 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
22102 */
22103
22104-static inline int valid_stack_ptr(struct thread_info *tinfo,
22105- void *p, unsigned int size, void *end)
22106+static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
22107 {
22108- void *t = tinfo;
22109 if (end) {
22110 if (p < end && p >= (end-THREAD_SIZE))
22111 return 1;
22112@@ -91,14 +90,14 @@ static inline int valid_stack_ptr(struct thread_info *tinfo,
22113 }
22114
22115 unsigned long
22116-print_context_stack(struct thread_info *tinfo,
22117+print_context_stack(struct task_struct *task, void *stack_start,
22118 unsigned long *stack, unsigned long bp,
22119 const struct stacktrace_ops *ops, void *data,
22120 unsigned long *end, int *graph)
22121 {
22122 struct stack_frame *frame = (struct stack_frame *)bp;
22123
22124- while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
22125+ while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
22126 unsigned long addr;
22127
22128 addr = *stack;
22129@@ -110,7 +109,7 @@ print_context_stack(struct thread_info *tinfo,
22130 } else {
22131 ops->address(data, addr, 0);
22132 }
22133- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
22134+ print_ftrace_graph_addr(addr, data, ops, task, graph);
22135 }
22136 stack++;
22137 }
22138@@ -119,7 +118,7 @@ print_context_stack(struct thread_info *tinfo,
22139 EXPORT_SYMBOL_GPL(print_context_stack);
22140
22141 unsigned long
22142-print_context_stack_bp(struct thread_info *tinfo,
22143+print_context_stack_bp(struct task_struct *task, void *stack_start,
22144 unsigned long *stack, unsigned long bp,
22145 const struct stacktrace_ops *ops, void *data,
22146 unsigned long *end, int *graph)
22147@@ -127,7 +126,7 @@ print_context_stack_bp(struct thread_info *tinfo,
22148 struct stack_frame *frame = (struct stack_frame *)bp;
22149 unsigned long *ret_addr = &frame->return_address;
22150
22151- while (valid_stack_ptr(tinfo, ret_addr, sizeof(*ret_addr), end)) {
22152+ while (valid_stack_ptr(stack_start, ret_addr, sizeof(*ret_addr), end)) {
22153 unsigned long addr = *ret_addr;
22154
22155 if (!__kernel_text_address(addr))
22156@@ -136,7 +135,7 @@ print_context_stack_bp(struct thread_info *tinfo,
22157 ops->address(data, addr, 1);
22158 frame = frame->next_frame;
22159 ret_addr = &frame->return_address;
22160- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
22161+ print_ftrace_graph_addr(addr, data, ops, task, graph);
22162 }
22163
22164 return (unsigned long)frame;
22165@@ -155,7 +154,7 @@ static int print_trace_stack(void *data, char *name)
22166 static void print_trace_address(void *data, unsigned long addr, int reliable)
22167 {
22168 touch_nmi_watchdog();
22169- printk(data);
22170+ printk("%s", (char *)data);
22171 printk_stack_address(addr, reliable);
22172 }
22173
22174@@ -224,6 +223,8 @@ unsigned __kprobes long oops_begin(void)
22175 }
22176 EXPORT_SYMBOL_GPL(oops_begin);
22177
22178+extern void gr_handle_kernel_exploit(void);
22179+
22180 void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
22181 {
22182 if (regs && kexec_should_crash(current))
22183@@ -245,7 +246,10 @@ void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
22184 panic("Fatal exception in interrupt");
22185 if (panic_on_oops)
22186 panic("Fatal exception");
22187- do_exit(signr);
22188+
22189+ gr_handle_kernel_exploit();
22190+
22191+ do_group_exit(signr);
22192 }
22193
22194 int __kprobes __die(const char *str, struct pt_regs *regs, long err)
22195@@ -273,7 +277,7 @@ int __kprobes __die(const char *str, struct pt_regs *regs, long err)
22196 print_modules();
22197 show_regs(regs);
22198 #ifdef CONFIG_X86_32
22199- if (user_mode_vm(regs)) {
22200+ if (user_mode(regs)) {
22201 sp = regs->sp;
22202 ss = regs->ss & 0xffff;
22203 } else {
22204@@ -301,7 +305,7 @@ void die(const char *str, struct pt_regs *regs, long err)
22205 unsigned long flags = oops_begin();
22206 int sig = SIGSEGV;
22207
22208- if (!user_mode_vm(regs))
22209+ if (!user_mode(regs))
22210 report_bug(regs->ip, regs);
22211
22212 if (__die(str, regs, err))
22213diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c
22214index f2a1770..10fa52d 100644
22215--- a/arch/x86/kernel/dumpstack_32.c
22216+++ b/arch/x86/kernel/dumpstack_32.c
22217@@ -38,15 +38,13 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
22218 bp = stack_frame(task, regs);
22219
22220 for (;;) {
22221- struct thread_info *context;
22222+ void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
22223
22224- context = (struct thread_info *)
22225- ((unsigned long)stack & (~(THREAD_SIZE - 1)));
22226- bp = ops->walk_stack(context, stack, bp, ops, data, NULL, &graph);
22227+ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
22228
22229- stack = (unsigned long *)context->previous_esp;
22230- if (!stack)
22231+ if (stack_start == task_stack_page(task))
22232 break;
22233+ stack = *(unsigned long **)stack_start;
22234 if (ops->stack(data, "IRQ") < 0)
22235 break;
22236 touch_nmi_watchdog();
22237@@ -87,27 +85,28 @@ void show_regs(struct pt_regs *regs)
22238 int i;
22239
22240 show_regs_print_info(KERN_EMERG);
22241- __show_regs(regs, !user_mode_vm(regs));
22242+ __show_regs(regs, !user_mode(regs));
22243
22244 /*
22245 * When in-kernel, we also print out the stack and code at the
22246 * time of the fault..
22247 */
22248- if (!user_mode_vm(regs)) {
22249+ if (!user_mode(regs)) {
22250 unsigned int code_prologue = code_bytes * 43 / 64;
22251 unsigned int code_len = code_bytes;
22252 unsigned char c;
22253 u8 *ip;
22254+ unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(0)[(0xffff & regs->cs) >> 3]);
22255
22256 pr_emerg("Stack:\n");
22257 show_stack_log_lvl(NULL, regs, &regs->sp, 0, KERN_EMERG);
22258
22259 pr_emerg("Code:");
22260
22261- ip = (u8 *)regs->ip - code_prologue;
22262+ ip = (u8 *)regs->ip - code_prologue + cs_base;
22263 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
22264 /* try starting at IP */
22265- ip = (u8 *)regs->ip;
22266+ ip = (u8 *)regs->ip + cs_base;
22267 code_len = code_len - code_prologue + 1;
22268 }
22269 for (i = 0; i < code_len; i++, ip++) {
22270@@ -116,7 +115,7 @@ void show_regs(struct pt_regs *regs)
22271 pr_cont(" Bad EIP value.");
22272 break;
22273 }
22274- if (ip == (u8 *)regs->ip)
22275+ if (ip == (u8 *)regs->ip + cs_base)
22276 pr_cont(" <%02x>", c);
22277 else
22278 pr_cont(" %02x", c);
22279@@ -129,6 +128,7 @@ int is_valid_bugaddr(unsigned long ip)
22280 {
22281 unsigned short ud2;
22282
22283+ ip = ktla_ktva(ip);
22284 if (ip < PAGE_OFFSET)
22285 return 0;
22286 if (probe_kernel_address((unsigned short *)ip, ud2))
22287@@ -136,3 +136,15 @@ int is_valid_bugaddr(unsigned long ip)
22288
22289 return ud2 == 0x0b0f;
22290 }
22291+
22292+#if defined(CONFIG_PAX_MEMORY_STACKLEAK) || defined(CONFIG_PAX_USERCOPY)
22293+void pax_check_alloca(unsigned long size)
22294+{
22295+ unsigned long sp = (unsigned long)&sp, stack_left;
22296+
22297+ /* all kernel stacks are of the same size */
22298+ stack_left = sp & (THREAD_SIZE - 1);
22299+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
22300+}
22301+EXPORT_SYMBOL(pax_check_alloca);
22302+#endif
22303diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
22304index addb207..921706b 100644
22305--- a/arch/x86/kernel/dumpstack_64.c
22306+++ b/arch/x86/kernel/dumpstack_64.c
22307@@ -119,9 +119,9 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
22308 unsigned long *irq_stack_end =
22309 (unsigned long *)per_cpu(irq_stack_ptr, cpu);
22310 unsigned used = 0;
22311- struct thread_info *tinfo;
22312 int graph = 0;
22313 unsigned long dummy;
22314+ void *stack_start;
22315
22316 if (!task)
22317 task = current;
22318@@ -142,10 +142,10 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
22319 * current stack address. If the stacks consist of nested
22320 * exceptions
22321 */
22322- tinfo = task_thread_info(task);
22323 for (;;) {
22324 char *id;
22325 unsigned long *estack_end;
22326+
22327 estack_end = in_exception_stack(cpu, (unsigned long)stack,
22328 &used, &id);
22329
22330@@ -153,7 +153,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
22331 if (ops->stack(data, id) < 0)
22332 break;
22333
22334- bp = ops->walk_stack(tinfo, stack, bp, ops,
22335+ bp = ops->walk_stack(task, estack_end - EXCEPTION_STKSZ, stack, bp, ops,
22336 data, estack_end, &graph);
22337 ops->stack(data, "<EOE>");
22338 /*
22339@@ -161,6 +161,8 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
22340 * second-to-last pointer (index -2 to end) in the
22341 * exception stack:
22342 */
22343+ if ((u16)estack_end[-1] != __KERNEL_DS)
22344+ goto out;
22345 stack = (unsigned long *) estack_end[-2];
22346 continue;
22347 }
22348@@ -172,7 +174,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
22349 if (in_irq_stack(stack, irq_stack, irq_stack_end)) {
22350 if (ops->stack(data, "IRQ") < 0)
22351 break;
22352- bp = ops->walk_stack(tinfo, stack, bp,
22353+ bp = ops->walk_stack(task, irq_stack, stack, bp,
22354 ops, data, irq_stack_end, &graph);
22355 /*
22356 * We link to the next stack (which would be
22357@@ -191,7 +193,9 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
22358 /*
22359 * This handles the process stack:
22360 */
22361- bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph);
22362+ stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
22363+ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
22364+out:
22365 put_cpu();
22366 }
22367 EXPORT_SYMBOL(dump_trace);
22368@@ -300,3 +304,50 @@ int is_valid_bugaddr(unsigned long ip)
22369
22370 return ud2 == 0x0b0f;
22371 }
22372+
22373+#if defined(CONFIG_PAX_MEMORY_STACKLEAK) || defined(CONFIG_PAX_USERCOPY)
22374+void pax_check_alloca(unsigned long size)
22375+{
22376+ unsigned long sp = (unsigned long)&sp, stack_start, stack_end;
22377+ unsigned cpu, used;
22378+ char *id;
22379+
22380+ /* check the process stack first */
22381+ stack_start = (unsigned long)task_stack_page(current);
22382+ stack_end = stack_start + THREAD_SIZE;
22383+ if (likely(stack_start <= sp && sp < stack_end)) {
22384+ unsigned long stack_left = sp & (THREAD_SIZE - 1);
22385+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
22386+ return;
22387+ }
22388+
22389+ cpu = get_cpu();
22390+
22391+ /* check the irq stacks */
22392+ stack_end = (unsigned long)per_cpu(irq_stack_ptr, cpu);
22393+ stack_start = stack_end - IRQ_STACK_SIZE;
22394+ if (stack_start <= sp && sp < stack_end) {
22395+ unsigned long stack_left = sp & (IRQ_STACK_SIZE - 1);
22396+ put_cpu();
22397+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
22398+ return;
22399+ }
22400+
22401+ /* check the exception stacks */
22402+ used = 0;
22403+ stack_end = (unsigned long)in_exception_stack(cpu, sp, &used, &id);
22404+ stack_start = stack_end - EXCEPTION_STKSZ;
22405+ if (stack_end && stack_start <= sp && sp < stack_end) {
22406+ unsigned long stack_left = sp & (EXCEPTION_STKSZ - 1);
22407+ put_cpu();
22408+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
22409+ return;
22410+ }
22411+
22412+ put_cpu();
22413+
22414+ /* unknown stack */
22415+ BUG();
22416+}
22417+EXPORT_SYMBOL(pax_check_alloca);
22418+#endif
22419diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
22420index 988c00a..4f673b6 100644
22421--- a/arch/x86/kernel/e820.c
22422+++ b/arch/x86/kernel/e820.c
22423@@ -803,8 +803,8 @@ unsigned long __init e820_end_of_low_ram_pfn(void)
22424
22425 static void early_panic(char *msg)
22426 {
22427- early_printk(msg);
22428- panic(msg);
22429+ early_printk("%s", msg);
22430+ panic("%s", msg);
22431 }
22432
22433 static int userdef __initdata;
22434diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c
22435index 01d1c18..8073693 100644
22436--- a/arch/x86/kernel/early_printk.c
22437+++ b/arch/x86/kernel/early_printk.c
22438@@ -7,6 +7,7 @@
22439 #include <linux/pci_regs.h>
22440 #include <linux/pci_ids.h>
22441 #include <linux/errno.h>
22442+#include <linux/sched.h>
22443 #include <asm/io.h>
22444 #include <asm/processor.h>
22445 #include <asm/fcntl.h>
22446diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
22447index a2a4f46..6cab058 100644
22448--- a/arch/x86/kernel/entry_32.S
22449+++ b/arch/x86/kernel/entry_32.S
22450@@ -177,13 +177,153 @@
22451 /*CFI_REL_OFFSET gs, PT_GS*/
22452 .endm
22453 .macro SET_KERNEL_GS reg
22454+
22455+#ifdef CONFIG_CC_STACKPROTECTOR
22456 movl $(__KERNEL_STACK_CANARY), \reg
22457+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
22458+ movl $(__USER_DS), \reg
22459+#else
22460+ xorl \reg, \reg
22461+#endif
22462+
22463 movl \reg, %gs
22464 .endm
22465
22466 #endif /* CONFIG_X86_32_LAZY_GS */
22467
22468-.macro SAVE_ALL
22469+.macro pax_enter_kernel
22470+#ifdef CONFIG_PAX_KERNEXEC
22471+ call pax_enter_kernel
22472+#endif
22473+.endm
22474+
22475+.macro pax_exit_kernel
22476+#ifdef CONFIG_PAX_KERNEXEC
22477+ call pax_exit_kernel
22478+#endif
22479+.endm
22480+
22481+#ifdef CONFIG_PAX_KERNEXEC
22482+ENTRY(pax_enter_kernel)
22483+#ifdef CONFIG_PARAVIRT
22484+ pushl %eax
22485+ pushl %ecx
22486+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
22487+ mov %eax, %esi
22488+#else
22489+ mov %cr0, %esi
22490+#endif
22491+ bts $16, %esi
22492+ jnc 1f
22493+ mov %cs, %esi
22494+ cmp $__KERNEL_CS, %esi
22495+ jz 3f
22496+ ljmp $__KERNEL_CS, $3f
22497+1: ljmp $__KERNEXEC_KERNEL_CS, $2f
22498+2:
22499+#ifdef CONFIG_PARAVIRT
22500+ mov %esi, %eax
22501+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
22502+#else
22503+ mov %esi, %cr0
22504+#endif
22505+3:
22506+#ifdef CONFIG_PARAVIRT
22507+ popl %ecx
22508+ popl %eax
22509+#endif
22510+ ret
22511+ENDPROC(pax_enter_kernel)
22512+
22513+ENTRY(pax_exit_kernel)
22514+#ifdef CONFIG_PARAVIRT
22515+ pushl %eax
22516+ pushl %ecx
22517+#endif
22518+ mov %cs, %esi
22519+ cmp $__KERNEXEC_KERNEL_CS, %esi
22520+ jnz 2f
22521+#ifdef CONFIG_PARAVIRT
22522+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
22523+ mov %eax, %esi
22524+#else
22525+ mov %cr0, %esi
22526+#endif
22527+ btr $16, %esi
22528+ ljmp $__KERNEL_CS, $1f
22529+1:
22530+#ifdef CONFIG_PARAVIRT
22531+ mov %esi, %eax
22532+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
22533+#else
22534+ mov %esi, %cr0
22535+#endif
22536+2:
22537+#ifdef CONFIG_PARAVIRT
22538+ popl %ecx
22539+ popl %eax
22540+#endif
22541+ ret
22542+ENDPROC(pax_exit_kernel)
22543+#endif
22544+
22545+ .macro pax_erase_kstack
22546+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
22547+ call pax_erase_kstack
22548+#endif
22549+ .endm
22550+
22551+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
22552+/*
22553+ * ebp: thread_info
22554+ */
22555+ENTRY(pax_erase_kstack)
22556+ pushl %edi
22557+ pushl %ecx
22558+ pushl %eax
22559+
22560+ mov TI_lowest_stack(%ebp), %edi
22561+ mov $-0xBEEF, %eax
22562+ std
22563+
22564+1: mov %edi, %ecx
22565+ and $THREAD_SIZE_asm - 1, %ecx
22566+ shr $2, %ecx
22567+ repne scasl
22568+ jecxz 2f
22569+
22570+ cmp $2*16, %ecx
22571+ jc 2f
22572+
22573+ mov $2*16, %ecx
22574+ repe scasl
22575+ jecxz 2f
22576+ jne 1b
22577+
22578+2: cld
22579+ mov %esp, %ecx
22580+ sub %edi, %ecx
22581+
22582+ cmp $THREAD_SIZE_asm, %ecx
22583+ jb 3f
22584+ ud2
22585+3:
22586+
22587+ shr $2, %ecx
22588+ rep stosl
22589+
22590+ mov TI_task_thread_sp0(%ebp), %edi
22591+ sub $128, %edi
22592+ mov %edi, TI_lowest_stack(%ebp)
22593+
22594+ popl %eax
22595+ popl %ecx
22596+ popl %edi
22597+ ret
22598+ENDPROC(pax_erase_kstack)
22599+#endif
22600+
22601+.macro __SAVE_ALL _DS
22602 cld
22603 PUSH_GS
22604 pushl_cfi %fs
22605@@ -206,7 +346,7 @@
22606 CFI_REL_OFFSET ecx, 0
22607 pushl_cfi %ebx
22608 CFI_REL_OFFSET ebx, 0
22609- movl $(__USER_DS), %edx
22610+ movl $\_DS, %edx
22611 movl %edx, %ds
22612 movl %edx, %es
22613 movl $(__KERNEL_PERCPU), %edx
22614@@ -214,6 +354,15 @@
22615 SET_KERNEL_GS %edx
22616 .endm
22617
22618+.macro SAVE_ALL
22619+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
22620+ __SAVE_ALL __KERNEL_DS
22621+ pax_enter_kernel
22622+#else
22623+ __SAVE_ALL __USER_DS
22624+#endif
22625+.endm
22626+
22627 .macro RESTORE_INT_REGS
22628 popl_cfi %ebx
22629 CFI_RESTORE ebx
22630@@ -297,7 +446,7 @@ ENTRY(ret_from_fork)
22631 popfl_cfi
22632 jmp syscall_exit
22633 CFI_ENDPROC
22634-END(ret_from_fork)
22635+ENDPROC(ret_from_fork)
22636
22637 ENTRY(ret_from_kernel_thread)
22638 CFI_STARTPROC
22639@@ -344,7 +493,15 @@ ret_from_intr:
22640 andl $SEGMENT_RPL_MASK, %eax
22641 #endif
22642 cmpl $USER_RPL, %eax
22643+
22644+#ifdef CONFIG_PAX_KERNEXEC
22645+ jae resume_userspace
22646+
22647+ pax_exit_kernel
22648+ jmp resume_kernel
22649+#else
22650 jb resume_kernel # not returning to v8086 or userspace
22651+#endif
22652
22653 ENTRY(resume_userspace)
22654 LOCKDEP_SYS_EXIT
22655@@ -356,8 +513,8 @@ ENTRY(resume_userspace)
22656 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
22657 # int/exception return?
22658 jne work_pending
22659- jmp restore_all
22660-END(ret_from_exception)
22661+ jmp restore_all_pax
22662+ENDPROC(ret_from_exception)
22663
22664 #ifdef CONFIG_PREEMPT
22665 ENTRY(resume_kernel)
22666@@ -369,7 +526,7 @@ need_resched:
22667 jz restore_all
22668 call preempt_schedule_irq
22669 jmp need_resched
22670-END(resume_kernel)
22671+ENDPROC(resume_kernel)
22672 #endif
22673 CFI_ENDPROC
22674 /*
22675@@ -403,30 +560,45 @@ sysenter_past_esp:
22676 /*CFI_REL_OFFSET cs, 0*/
22677 /*
22678 * Push current_thread_info()->sysenter_return to the stack.
22679- * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
22680- * pushed above; +8 corresponds to copy_thread's esp0 setting.
22681 */
22682- pushl_cfi ((TI_sysenter_return)-THREAD_SIZE+8+4*4)(%esp)
22683+ pushl_cfi $0
22684 CFI_REL_OFFSET eip, 0
22685
22686 pushl_cfi %eax
22687 SAVE_ALL
22688+ GET_THREAD_INFO(%ebp)
22689+ movl TI_sysenter_return(%ebp),%ebp
22690+ movl %ebp,PT_EIP(%esp)
22691 ENABLE_INTERRUPTS(CLBR_NONE)
22692
22693 /*
22694 * Load the potential sixth argument from user stack.
22695 * Careful about security.
22696 */
22697+ movl PT_OLDESP(%esp),%ebp
22698+
22699+#ifdef CONFIG_PAX_MEMORY_UDEREF
22700+ mov PT_OLDSS(%esp),%ds
22701+1: movl %ds:(%ebp),%ebp
22702+ push %ss
22703+ pop %ds
22704+#else
22705 cmpl $__PAGE_OFFSET-3,%ebp
22706 jae syscall_fault
22707 ASM_STAC
22708 1: movl (%ebp),%ebp
22709 ASM_CLAC
22710+#endif
22711+
22712 movl %ebp,PT_EBP(%esp)
22713 _ASM_EXTABLE(1b,syscall_fault)
22714
22715 GET_THREAD_INFO(%ebp)
22716
22717+#ifdef CONFIG_PAX_RANDKSTACK
22718+ pax_erase_kstack
22719+#endif
22720+
22721 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
22722 jnz sysenter_audit
22723 sysenter_do_call:
22724@@ -441,12 +613,24 @@ sysenter_do_call:
22725 testl $_TIF_ALLWORK_MASK, %ecx
22726 jne sysexit_audit
22727 sysenter_exit:
22728+
22729+#ifdef CONFIG_PAX_RANDKSTACK
22730+ pushl_cfi %eax
22731+ movl %esp, %eax
22732+ call pax_randomize_kstack
22733+ popl_cfi %eax
22734+#endif
22735+
22736+ pax_erase_kstack
22737+
22738 /* if something modifies registers it must also disable sysexit */
22739 movl PT_EIP(%esp), %edx
22740 movl PT_OLDESP(%esp), %ecx
22741 xorl %ebp,%ebp
22742 TRACE_IRQS_ON
22743 1: mov PT_FS(%esp), %fs
22744+2: mov PT_DS(%esp), %ds
22745+3: mov PT_ES(%esp), %es
22746 PTGS_TO_GS
22747 ENABLE_INTERRUPTS_SYSEXIT
22748
22749@@ -463,6 +647,9 @@ sysenter_audit:
22750 movl %eax,%edx /* 2nd arg: syscall number */
22751 movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
22752 call __audit_syscall_entry
22753+
22754+ pax_erase_kstack
22755+
22756 pushl_cfi %ebx
22757 movl PT_EAX(%esp),%eax /* reload syscall number */
22758 jmp sysenter_do_call
22759@@ -488,10 +675,16 @@ sysexit_audit:
22760
22761 CFI_ENDPROC
22762 .pushsection .fixup,"ax"
22763-2: movl $0,PT_FS(%esp)
22764+4: movl $0,PT_FS(%esp)
22765+ jmp 1b
22766+5: movl $0,PT_DS(%esp)
22767+ jmp 1b
22768+6: movl $0,PT_ES(%esp)
22769 jmp 1b
22770 .popsection
22771- _ASM_EXTABLE(1b,2b)
22772+ _ASM_EXTABLE(1b,4b)
22773+ _ASM_EXTABLE(2b,5b)
22774+ _ASM_EXTABLE(3b,6b)
22775 PTGS_TO_GS_EX
22776 ENDPROC(ia32_sysenter_target)
22777
22778@@ -506,6 +699,11 @@ ENTRY(system_call)
22779 pushl_cfi %eax # save orig_eax
22780 SAVE_ALL
22781 GET_THREAD_INFO(%ebp)
22782+
22783+#ifdef CONFIG_PAX_RANDKSTACK
22784+ pax_erase_kstack
22785+#endif
22786+
22787 # system call tracing in operation / emulation
22788 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
22789 jnz syscall_trace_entry
22790@@ -524,6 +722,15 @@ syscall_exit:
22791 testl $_TIF_ALLWORK_MASK, %ecx # current->work
22792 jne syscall_exit_work
22793
22794+restore_all_pax:
22795+
22796+#ifdef CONFIG_PAX_RANDKSTACK
22797+ movl %esp, %eax
22798+ call pax_randomize_kstack
22799+#endif
22800+
22801+ pax_erase_kstack
22802+
22803 restore_all:
22804 TRACE_IRQS_IRET
22805 restore_all_notrace:
22806@@ -580,14 +787,34 @@ ldt_ss:
22807 * compensating for the offset by changing to the ESPFIX segment with
22808 * a base address that matches for the difference.
22809 */
22810-#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
22811+#define GDT_ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)(%ebx)
22812 mov %esp, %edx /* load kernel esp */
22813 mov PT_OLDESP(%esp), %eax /* load userspace esp */
22814 mov %dx, %ax /* eax: new kernel esp */
22815 sub %eax, %edx /* offset (low word is 0) */
22816+#ifdef CONFIG_SMP
22817+ movl PER_CPU_VAR(cpu_number), %ebx
22818+ shll $PAGE_SHIFT_asm, %ebx
22819+ addl $cpu_gdt_table, %ebx
22820+#else
22821+ movl $cpu_gdt_table, %ebx
22822+#endif
22823 shr $16, %edx
22824- mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
22825- mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
22826+
22827+#ifdef CONFIG_PAX_KERNEXEC
22828+ mov %cr0, %esi
22829+ btr $16, %esi
22830+ mov %esi, %cr0
22831+#endif
22832+
22833+ mov %dl, 4 + GDT_ESPFIX_SS /* bits 16..23 */
22834+ mov %dh, 7 + GDT_ESPFIX_SS /* bits 24..31 */
22835+
22836+#ifdef CONFIG_PAX_KERNEXEC
22837+ bts $16, %esi
22838+ mov %esi, %cr0
22839+#endif
22840+
22841 pushl_cfi $__ESPFIX_SS
22842 pushl_cfi %eax /* new kernel esp */
22843 /* Disable interrupts, but do not irqtrace this section: we
22844@@ -616,20 +843,18 @@ work_resched:
22845 movl TI_flags(%ebp), %ecx
22846 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
22847 # than syscall tracing?
22848- jz restore_all
22849+ jz restore_all_pax
22850 testb $_TIF_NEED_RESCHED, %cl
22851 jnz work_resched
22852
22853 work_notifysig: # deal with pending signals and
22854 # notify-resume requests
22855+ movl %esp, %eax
22856 #ifdef CONFIG_VM86
22857 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
22858- movl %esp, %eax
22859 jne work_notifysig_v86 # returning to kernel-space or
22860 # vm86-space
22861 1:
22862-#else
22863- movl %esp, %eax
22864 #endif
22865 TRACE_IRQS_ON
22866 ENABLE_INTERRUPTS(CLBR_NONE)
22867@@ -650,7 +875,7 @@ work_notifysig_v86:
22868 movl %eax, %esp
22869 jmp 1b
22870 #endif
22871-END(work_pending)
22872+ENDPROC(work_pending)
22873
22874 # perform syscall exit tracing
22875 ALIGN
22876@@ -658,11 +883,14 @@ syscall_trace_entry:
22877 movl $-ENOSYS,PT_EAX(%esp)
22878 movl %esp, %eax
22879 call syscall_trace_enter
22880+
22881+ pax_erase_kstack
22882+
22883 /* What it returned is what we'll actually use. */
22884 cmpl $(NR_syscalls), %eax
22885 jnae syscall_call
22886 jmp syscall_exit
22887-END(syscall_trace_entry)
22888+ENDPROC(syscall_trace_entry)
22889
22890 # perform syscall exit tracing
22891 ALIGN
22892@@ -675,21 +903,25 @@ syscall_exit_work:
22893 movl %esp, %eax
22894 call syscall_trace_leave
22895 jmp resume_userspace
22896-END(syscall_exit_work)
22897+ENDPROC(syscall_exit_work)
22898 CFI_ENDPROC
22899
22900 RING0_INT_FRAME # can't unwind into user space anyway
22901 syscall_fault:
22902+#ifdef CONFIG_PAX_MEMORY_UDEREF
22903+ push %ss
22904+ pop %ds
22905+#endif
22906 ASM_CLAC
22907 GET_THREAD_INFO(%ebp)
22908 movl $-EFAULT,PT_EAX(%esp)
22909 jmp resume_userspace
22910-END(syscall_fault)
22911+ENDPROC(syscall_fault)
22912
22913 syscall_badsys:
22914 movl $-ENOSYS,PT_EAX(%esp)
22915 jmp resume_userspace
22916-END(syscall_badsys)
22917+ENDPROC(syscall_badsys)
22918 CFI_ENDPROC
22919 /*
22920 * End of kprobes section
22921@@ -705,8 +937,15 @@ END(syscall_badsys)
22922 * normal stack and adjusts ESP with the matching offset.
22923 */
22924 /* fixup the stack */
22925- mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
22926- mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
22927+#ifdef CONFIG_SMP
22928+ movl PER_CPU_VAR(cpu_number), %ebx
22929+ shll $PAGE_SHIFT_asm, %ebx
22930+ addl $cpu_gdt_table, %ebx
22931+#else
22932+ movl $cpu_gdt_table, %ebx
22933+#endif
22934+ mov 4 + GDT_ESPFIX_SS, %al /* bits 16..23 */
22935+ mov 7 + GDT_ESPFIX_SS, %ah /* bits 24..31 */
22936 shl $16, %eax
22937 addl %esp, %eax /* the adjusted stack pointer */
22938 pushl_cfi $__KERNEL_DS
22939@@ -759,7 +998,7 @@ vector=vector+1
22940 .endr
22941 2: jmp common_interrupt
22942 .endr
22943-END(irq_entries_start)
22944+ENDPROC(irq_entries_start)
22945
22946 .previous
22947 END(interrupt)
22948@@ -820,7 +1059,7 @@ ENTRY(coprocessor_error)
22949 pushl_cfi $do_coprocessor_error
22950 jmp error_code
22951 CFI_ENDPROC
22952-END(coprocessor_error)
22953+ENDPROC(coprocessor_error)
22954
22955 ENTRY(simd_coprocessor_error)
22956 RING0_INT_FRAME
22957@@ -833,7 +1072,7 @@ ENTRY(simd_coprocessor_error)
22958 .section .altinstructions,"a"
22959 altinstruction_entry 661b, 663f, X86_FEATURE_XMM, 662b-661b, 664f-663f
22960 .previous
22961-.section .altinstr_replacement,"ax"
22962+.section .altinstr_replacement,"a"
22963 663: pushl $do_simd_coprocessor_error
22964 664:
22965 .previous
22966@@ -842,7 +1081,7 @@ ENTRY(simd_coprocessor_error)
22967 #endif
22968 jmp error_code
22969 CFI_ENDPROC
22970-END(simd_coprocessor_error)
22971+ENDPROC(simd_coprocessor_error)
22972
22973 ENTRY(device_not_available)
22974 RING0_INT_FRAME
22975@@ -851,18 +1090,18 @@ ENTRY(device_not_available)
22976 pushl_cfi $do_device_not_available
22977 jmp error_code
22978 CFI_ENDPROC
22979-END(device_not_available)
22980+ENDPROC(device_not_available)
22981
22982 #ifdef CONFIG_PARAVIRT
22983 ENTRY(native_iret)
22984 iret
22985 _ASM_EXTABLE(native_iret, iret_exc)
22986-END(native_iret)
22987+ENDPROC(native_iret)
22988
22989 ENTRY(native_irq_enable_sysexit)
22990 sti
22991 sysexit
22992-END(native_irq_enable_sysexit)
22993+ENDPROC(native_irq_enable_sysexit)
22994 #endif
22995
22996 ENTRY(overflow)
22997@@ -872,7 +1111,7 @@ ENTRY(overflow)
22998 pushl_cfi $do_overflow
22999 jmp error_code
23000 CFI_ENDPROC
23001-END(overflow)
23002+ENDPROC(overflow)
23003
23004 ENTRY(bounds)
23005 RING0_INT_FRAME
23006@@ -881,7 +1120,7 @@ ENTRY(bounds)
23007 pushl_cfi $do_bounds
23008 jmp error_code
23009 CFI_ENDPROC
23010-END(bounds)
23011+ENDPROC(bounds)
23012
23013 ENTRY(invalid_op)
23014 RING0_INT_FRAME
23015@@ -890,7 +1129,7 @@ ENTRY(invalid_op)
23016 pushl_cfi $do_invalid_op
23017 jmp error_code
23018 CFI_ENDPROC
23019-END(invalid_op)
23020+ENDPROC(invalid_op)
23021
23022 ENTRY(coprocessor_segment_overrun)
23023 RING0_INT_FRAME
23024@@ -899,7 +1138,7 @@ ENTRY(coprocessor_segment_overrun)
23025 pushl_cfi $do_coprocessor_segment_overrun
23026 jmp error_code
23027 CFI_ENDPROC
23028-END(coprocessor_segment_overrun)
23029+ENDPROC(coprocessor_segment_overrun)
23030
23031 ENTRY(invalid_TSS)
23032 RING0_EC_FRAME
23033@@ -907,7 +1146,7 @@ ENTRY(invalid_TSS)
23034 pushl_cfi $do_invalid_TSS
23035 jmp error_code
23036 CFI_ENDPROC
23037-END(invalid_TSS)
23038+ENDPROC(invalid_TSS)
23039
23040 ENTRY(segment_not_present)
23041 RING0_EC_FRAME
23042@@ -915,7 +1154,7 @@ ENTRY(segment_not_present)
23043 pushl_cfi $do_segment_not_present
23044 jmp error_code
23045 CFI_ENDPROC
23046-END(segment_not_present)
23047+ENDPROC(segment_not_present)
23048
23049 ENTRY(stack_segment)
23050 RING0_EC_FRAME
23051@@ -923,7 +1162,7 @@ ENTRY(stack_segment)
23052 pushl_cfi $do_stack_segment
23053 jmp error_code
23054 CFI_ENDPROC
23055-END(stack_segment)
23056+ENDPROC(stack_segment)
23057
23058 ENTRY(alignment_check)
23059 RING0_EC_FRAME
23060@@ -931,7 +1170,7 @@ ENTRY(alignment_check)
23061 pushl_cfi $do_alignment_check
23062 jmp error_code
23063 CFI_ENDPROC
23064-END(alignment_check)
23065+ENDPROC(alignment_check)
23066
23067 ENTRY(divide_error)
23068 RING0_INT_FRAME
23069@@ -940,7 +1179,7 @@ ENTRY(divide_error)
23070 pushl_cfi $do_divide_error
23071 jmp error_code
23072 CFI_ENDPROC
23073-END(divide_error)
23074+ENDPROC(divide_error)
23075
23076 #ifdef CONFIG_X86_MCE
23077 ENTRY(machine_check)
23078@@ -950,7 +1189,7 @@ ENTRY(machine_check)
23079 pushl_cfi machine_check_vector
23080 jmp error_code
23081 CFI_ENDPROC
23082-END(machine_check)
23083+ENDPROC(machine_check)
23084 #endif
23085
23086 ENTRY(spurious_interrupt_bug)
23087@@ -960,7 +1199,7 @@ ENTRY(spurious_interrupt_bug)
23088 pushl_cfi $do_spurious_interrupt_bug
23089 jmp error_code
23090 CFI_ENDPROC
23091-END(spurious_interrupt_bug)
23092+ENDPROC(spurious_interrupt_bug)
23093 /*
23094 * End of kprobes section
23095 */
23096@@ -1070,7 +1309,7 @@ BUILD_INTERRUPT3(hyperv_callback_vector, HYPERVISOR_CALLBACK_VECTOR,
23097
23098 ENTRY(mcount)
23099 ret
23100-END(mcount)
23101+ENDPROC(mcount)
23102
23103 ENTRY(ftrace_caller)
23104 cmpl $0, function_trace_stop
23105@@ -1103,7 +1342,7 @@ ftrace_graph_call:
23106 .globl ftrace_stub
23107 ftrace_stub:
23108 ret
23109-END(ftrace_caller)
23110+ENDPROC(ftrace_caller)
23111
23112 ENTRY(ftrace_regs_caller)
23113 pushf /* push flags before compare (in cs location) */
23114@@ -1207,7 +1446,7 @@ trace:
23115 popl %ecx
23116 popl %eax
23117 jmp ftrace_stub
23118-END(mcount)
23119+ENDPROC(mcount)
23120 #endif /* CONFIG_DYNAMIC_FTRACE */
23121 #endif /* CONFIG_FUNCTION_TRACER */
23122
23123@@ -1225,7 +1464,7 @@ ENTRY(ftrace_graph_caller)
23124 popl %ecx
23125 popl %eax
23126 ret
23127-END(ftrace_graph_caller)
23128+ENDPROC(ftrace_graph_caller)
23129
23130 .globl return_to_handler
23131 return_to_handler:
23132@@ -1291,15 +1530,18 @@ error_code:
23133 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
23134 REG_TO_PTGS %ecx
23135 SET_KERNEL_GS %ecx
23136- movl $(__USER_DS), %ecx
23137+ movl $(__KERNEL_DS), %ecx
23138 movl %ecx, %ds
23139 movl %ecx, %es
23140+
23141+ pax_enter_kernel
23142+
23143 TRACE_IRQS_OFF
23144 movl %esp,%eax # pt_regs pointer
23145 call *%edi
23146 jmp ret_from_exception
23147 CFI_ENDPROC
23148-END(page_fault)
23149+ENDPROC(page_fault)
23150
23151 /*
23152 * Debug traps and NMI can happen at the one SYSENTER instruction
23153@@ -1342,7 +1584,7 @@ debug_stack_correct:
23154 call do_debug
23155 jmp ret_from_exception
23156 CFI_ENDPROC
23157-END(debug)
23158+ENDPROC(debug)
23159
23160 /*
23161 * NMI is doubly nasty. It can happen _while_ we're handling
23162@@ -1380,6 +1622,9 @@ nmi_stack_correct:
23163 xorl %edx,%edx # zero error code
23164 movl %esp,%eax # pt_regs pointer
23165 call do_nmi
23166+
23167+ pax_exit_kernel
23168+
23169 jmp restore_all_notrace
23170 CFI_ENDPROC
23171
23172@@ -1416,12 +1661,15 @@ nmi_espfix_stack:
23173 FIXUP_ESPFIX_STACK # %eax == %esp
23174 xorl %edx,%edx # zero error code
23175 call do_nmi
23176+
23177+ pax_exit_kernel
23178+
23179 RESTORE_REGS
23180 lss 12+4(%esp), %esp # back to espfix stack
23181 CFI_ADJUST_CFA_OFFSET -24
23182 jmp irq_return
23183 CFI_ENDPROC
23184-END(nmi)
23185+ENDPROC(nmi)
23186
23187 ENTRY(int3)
23188 RING0_INT_FRAME
23189@@ -1434,14 +1682,14 @@ ENTRY(int3)
23190 call do_int3
23191 jmp ret_from_exception
23192 CFI_ENDPROC
23193-END(int3)
23194+ENDPROC(int3)
23195
23196 ENTRY(general_protection)
23197 RING0_EC_FRAME
23198 pushl_cfi $do_general_protection
23199 jmp error_code
23200 CFI_ENDPROC
23201-END(general_protection)
23202+ENDPROC(general_protection)
23203
23204 #ifdef CONFIG_KVM_GUEST
23205 ENTRY(async_page_fault)
23206@@ -1450,7 +1698,7 @@ ENTRY(async_page_fault)
23207 pushl_cfi $do_async_page_fault
23208 jmp error_code
23209 CFI_ENDPROC
23210-END(async_page_fault)
23211+ENDPROC(async_page_fault)
23212 #endif
23213
23214 /*
23215diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
23216index 1e96c36..3ff710a 100644
23217--- a/arch/x86/kernel/entry_64.S
23218+++ b/arch/x86/kernel/entry_64.S
23219@@ -59,6 +59,8 @@
23220 #include <asm/context_tracking.h>
23221 #include <asm/smap.h>
23222 #include <linux/err.h>
23223+#include <asm/pgtable.h>
23224+#include <asm/alternative-asm.h>
23225
23226 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
23227 #include <linux/elf-em.h>
23228@@ -80,8 +82,9 @@
23229 #ifdef CONFIG_DYNAMIC_FTRACE
23230
23231 ENTRY(function_hook)
23232+ pax_force_retaddr
23233 retq
23234-END(function_hook)
23235+ENDPROC(function_hook)
23236
23237 /* skip is set if stack has been adjusted */
23238 .macro ftrace_caller_setup skip=0
23239@@ -122,8 +125,9 @@ GLOBAL(ftrace_graph_call)
23240 #endif
23241
23242 GLOBAL(ftrace_stub)
23243+ pax_force_retaddr
23244 retq
23245-END(ftrace_caller)
23246+ENDPROC(ftrace_caller)
23247
23248 ENTRY(ftrace_regs_caller)
23249 /* Save the current flags before compare (in SS location)*/
23250@@ -191,7 +195,7 @@ ftrace_restore_flags:
23251 popfq
23252 jmp ftrace_stub
23253
23254-END(ftrace_regs_caller)
23255+ENDPROC(ftrace_regs_caller)
23256
23257
23258 #else /* ! CONFIG_DYNAMIC_FTRACE */
23259@@ -212,6 +216,7 @@ ENTRY(function_hook)
23260 #endif
23261
23262 GLOBAL(ftrace_stub)
23263+ pax_force_retaddr
23264 retq
23265
23266 trace:
23267@@ -225,12 +230,13 @@ trace:
23268 #endif
23269 subq $MCOUNT_INSN_SIZE, %rdi
23270
23271+ pax_force_fptr ftrace_trace_function
23272 call *ftrace_trace_function
23273
23274 MCOUNT_RESTORE_FRAME
23275
23276 jmp ftrace_stub
23277-END(function_hook)
23278+ENDPROC(function_hook)
23279 #endif /* CONFIG_DYNAMIC_FTRACE */
23280 #endif /* CONFIG_FUNCTION_TRACER */
23281
23282@@ -252,8 +258,9 @@ ENTRY(ftrace_graph_caller)
23283
23284 MCOUNT_RESTORE_FRAME
23285
23286+ pax_force_retaddr
23287 retq
23288-END(ftrace_graph_caller)
23289+ENDPROC(ftrace_graph_caller)
23290
23291 GLOBAL(return_to_handler)
23292 subq $24, %rsp
23293@@ -269,7 +276,9 @@ GLOBAL(return_to_handler)
23294 movq 8(%rsp), %rdx
23295 movq (%rsp), %rax
23296 addq $24, %rsp
23297+ pax_force_fptr %rdi
23298 jmp *%rdi
23299+ENDPROC(return_to_handler)
23300 #endif
23301
23302
23303@@ -284,6 +293,430 @@ ENTRY(native_usergs_sysret64)
23304 ENDPROC(native_usergs_sysret64)
23305 #endif /* CONFIG_PARAVIRT */
23306
23307+ .macro ljmpq sel, off
23308+#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
23309+ .byte 0x48; ljmp *1234f(%rip)
23310+ .pushsection .rodata
23311+ .align 16
23312+ 1234: .quad \off; .word \sel
23313+ .popsection
23314+#else
23315+ pushq $\sel
23316+ pushq $\off
23317+ lretq
23318+#endif
23319+ .endm
23320+
23321+ .macro pax_enter_kernel
23322+ pax_set_fptr_mask
23323+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
23324+ call pax_enter_kernel
23325+#endif
23326+ .endm
23327+
23328+ .macro pax_exit_kernel
23329+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
23330+ call pax_exit_kernel
23331+#endif
23332+
23333+ .endm
23334+
23335+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
23336+ENTRY(pax_enter_kernel)
23337+ pushq %rdi
23338+
23339+#ifdef CONFIG_PARAVIRT
23340+ PV_SAVE_REGS(CLBR_RDI)
23341+#endif
23342+
23343+#ifdef CONFIG_PAX_KERNEXEC
23344+ GET_CR0_INTO_RDI
23345+ bts $16,%rdi
23346+ jnc 3f
23347+ mov %cs,%edi
23348+ cmp $__KERNEL_CS,%edi
23349+ jnz 2f
23350+1:
23351+#endif
23352+
23353+#ifdef CONFIG_PAX_MEMORY_UDEREF
23354+ 661: jmp 111f
23355+ .pushsection .altinstr_replacement, "a"
23356+ 662: ASM_NOP2
23357+ .popsection
23358+ .pushsection .altinstructions, "a"
23359+ altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
23360+ .popsection
23361+ GET_CR3_INTO_RDI
23362+ cmp $0,%dil
23363+ jnz 112f
23364+ mov $__KERNEL_DS,%edi
23365+ mov %edi,%ss
23366+ jmp 111f
23367+112: cmp $1,%dil
23368+ jz 113f
23369+ ud2
23370+113: sub $4097,%rdi
23371+ bts $63,%rdi
23372+ SET_RDI_INTO_CR3
23373+ mov $__UDEREF_KERNEL_DS,%edi
23374+ mov %edi,%ss
23375+111:
23376+#endif
23377+
23378+#ifdef CONFIG_PARAVIRT
23379+ PV_RESTORE_REGS(CLBR_RDI)
23380+#endif
23381+
23382+ popq %rdi
23383+ pax_force_retaddr
23384+ retq
23385+
23386+#ifdef CONFIG_PAX_KERNEXEC
23387+2: ljmpq __KERNEL_CS,1b
23388+3: ljmpq __KERNEXEC_KERNEL_CS,4f
23389+4: SET_RDI_INTO_CR0
23390+ jmp 1b
23391+#endif
23392+ENDPROC(pax_enter_kernel)
23393+
23394+ENTRY(pax_exit_kernel)
23395+ pushq %rdi
23396+
23397+#ifdef CONFIG_PARAVIRT
23398+ PV_SAVE_REGS(CLBR_RDI)
23399+#endif
23400+
23401+#ifdef CONFIG_PAX_KERNEXEC
23402+ mov %cs,%rdi
23403+ cmp $__KERNEXEC_KERNEL_CS,%edi
23404+ jz 2f
23405+ GET_CR0_INTO_RDI
23406+ bts $16,%rdi
23407+ jnc 4f
23408+1:
23409+#endif
23410+
23411+#ifdef CONFIG_PAX_MEMORY_UDEREF
23412+ 661: jmp 111f
23413+ .pushsection .altinstr_replacement, "a"
23414+ 662: ASM_NOP2
23415+ .popsection
23416+ .pushsection .altinstructions, "a"
23417+ altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
23418+ .popsection
23419+ mov %ss,%edi
23420+ cmp $__UDEREF_KERNEL_DS,%edi
23421+ jnz 111f
23422+ GET_CR3_INTO_RDI
23423+ cmp $0,%dil
23424+ jz 112f
23425+ ud2
23426+112: add $4097,%rdi
23427+ bts $63,%rdi
23428+ SET_RDI_INTO_CR3
23429+ mov $__KERNEL_DS,%edi
23430+ mov %edi,%ss
23431+111:
23432+#endif
23433+
23434+#ifdef CONFIG_PARAVIRT
23435+ PV_RESTORE_REGS(CLBR_RDI);
23436+#endif
23437+
23438+ popq %rdi
23439+ pax_force_retaddr
23440+ retq
23441+
23442+#ifdef CONFIG_PAX_KERNEXEC
23443+2: GET_CR0_INTO_RDI
23444+ btr $16,%rdi
23445+ jnc 4f
23446+ ljmpq __KERNEL_CS,3f
23447+3: SET_RDI_INTO_CR0
23448+ jmp 1b
23449+4: ud2
23450+ jmp 4b
23451+#endif
23452+ENDPROC(pax_exit_kernel)
23453+#endif
23454+
23455+ .macro pax_enter_kernel_user
23456+ pax_set_fptr_mask
23457+#ifdef CONFIG_PAX_MEMORY_UDEREF
23458+ call pax_enter_kernel_user
23459+#endif
23460+ .endm
23461+
23462+ .macro pax_exit_kernel_user
23463+#ifdef CONFIG_PAX_MEMORY_UDEREF
23464+ call pax_exit_kernel_user
23465+#endif
23466+#ifdef CONFIG_PAX_RANDKSTACK
23467+ pushq %rax
23468+ pushq %r11
23469+ call pax_randomize_kstack
23470+ popq %r11
23471+ popq %rax
23472+#endif
23473+ .endm
23474+
23475+#ifdef CONFIG_PAX_MEMORY_UDEREF
23476+ENTRY(pax_enter_kernel_user)
23477+ pushq %rdi
23478+ pushq %rbx
23479+
23480+#ifdef CONFIG_PARAVIRT
23481+ PV_SAVE_REGS(CLBR_RDI)
23482+#endif
23483+
23484+ 661: jmp 111f
23485+ .pushsection .altinstr_replacement, "a"
23486+ 662: ASM_NOP2
23487+ .popsection
23488+ .pushsection .altinstructions, "a"
23489+ altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
23490+ .popsection
23491+ GET_CR3_INTO_RDI
23492+ cmp $1,%dil
23493+ jnz 4f
23494+ sub $4097,%rdi
23495+ bts $63,%rdi
23496+ SET_RDI_INTO_CR3
23497+ jmp 3f
23498+111:
23499+
23500+ GET_CR3_INTO_RDI
23501+ mov %rdi,%rbx
23502+ add $__START_KERNEL_map,%rbx
23503+ sub phys_base(%rip),%rbx
23504+
23505+#ifdef CONFIG_PARAVIRT
23506+ cmpl $0, pv_info+PARAVIRT_enabled
23507+ jz 1f
23508+ pushq %rdi
23509+ i = 0
23510+ .rept USER_PGD_PTRS
23511+ mov i*8(%rbx),%rsi
23512+ mov $0,%sil
23513+ lea i*8(%rbx),%rdi
23514+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
23515+ i = i + 1
23516+ .endr
23517+ popq %rdi
23518+ jmp 2f
23519+1:
23520+#endif
23521+
23522+ i = 0
23523+ .rept USER_PGD_PTRS
23524+ movb $0,i*8(%rbx)
23525+ i = i + 1
23526+ .endr
23527+
23528+2: SET_RDI_INTO_CR3
23529+
23530+#ifdef CONFIG_PAX_KERNEXEC
23531+ GET_CR0_INTO_RDI
23532+ bts $16,%rdi
23533+ SET_RDI_INTO_CR0
23534+#endif
23535+
23536+3:
23537+
23538+#ifdef CONFIG_PARAVIRT
23539+ PV_RESTORE_REGS(CLBR_RDI)
23540+#endif
23541+
23542+ popq %rbx
23543+ popq %rdi
23544+ pax_force_retaddr
23545+ retq
23546+4: ud2
23547+ENDPROC(pax_enter_kernel_user)
23548+
23549+ENTRY(pax_exit_kernel_user)
23550+ pushq %rdi
23551+ pushq %rbx
23552+
23553+#ifdef CONFIG_PARAVIRT
23554+ PV_SAVE_REGS(CLBR_RDI)
23555+#endif
23556+
23557+ GET_CR3_INTO_RDI
23558+ 661: jmp 1f
23559+ .pushsection .altinstr_replacement, "a"
23560+ 662: ASM_NOP2
23561+ .popsection
23562+ .pushsection .altinstructions, "a"
23563+ altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
23564+ .popsection
23565+ cmp $0,%dil
23566+ jnz 3f
23567+ add $4097,%rdi
23568+ bts $63,%rdi
23569+ SET_RDI_INTO_CR3
23570+ jmp 2f
23571+1:
23572+
23573+ mov %rdi,%rbx
23574+
23575+#ifdef CONFIG_PAX_KERNEXEC
23576+ GET_CR0_INTO_RDI
23577+ btr $16,%rdi
23578+ jnc 3f
23579+ SET_RDI_INTO_CR0
23580+#endif
23581+
23582+ add $__START_KERNEL_map,%rbx
23583+ sub phys_base(%rip),%rbx
23584+
23585+#ifdef CONFIG_PARAVIRT
23586+ cmpl $0, pv_info+PARAVIRT_enabled
23587+ jz 1f
23588+ i = 0
23589+ .rept USER_PGD_PTRS
23590+ mov i*8(%rbx),%rsi
23591+ mov $0x67,%sil
23592+ lea i*8(%rbx),%rdi
23593+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
23594+ i = i + 1
23595+ .endr
23596+ jmp 2f
23597+1:
23598+#endif
23599+
23600+ i = 0
23601+ .rept USER_PGD_PTRS
23602+ movb $0x67,i*8(%rbx)
23603+ i = i + 1
23604+ .endr
23605+2:
23606+
23607+#ifdef CONFIG_PARAVIRT
23608+ PV_RESTORE_REGS(CLBR_RDI)
23609+#endif
23610+
23611+ popq %rbx
23612+ popq %rdi
23613+ pax_force_retaddr
23614+ retq
23615+3: ud2
23616+ENDPROC(pax_exit_kernel_user)
23617+#endif
23618+
23619+ .macro pax_enter_kernel_nmi
23620+ pax_set_fptr_mask
23621+
23622+#ifdef CONFIG_PAX_KERNEXEC
23623+ GET_CR0_INTO_RDI
23624+ bts $16,%rdi
23625+ jc 110f
23626+ SET_RDI_INTO_CR0
23627+ or $2,%ebx
23628+110:
23629+#endif
23630+
23631+#ifdef CONFIG_PAX_MEMORY_UDEREF
23632+ 661: jmp 111f
23633+ .pushsection .altinstr_replacement, "a"
23634+ 662: ASM_NOP2
23635+ .popsection
23636+ .pushsection .altinstructions, "a"
23637+ altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
23638+ .popsection
23639+ GET_CR3_INTO_RDI
23640+ cmp $0,%dil
23641+ jz 111f
23642+ sub $4097,%rdi
23643+ or $4,%ebx
23644+ bts $63,%rdi
23645+ SET_RDI_INTO_CR3
23646+ mov $__UDEREF_KERNEL_DS,%edi
23647+ mov %edi,%ss
23648+111:
23649+#endif
23650+ .endm
23651+
23652+ .macro pax_exit_kernel_nmi
23653+#ifdef CONFIG_PAX_KERNEXEC
23654+ btr $1,%ebx
23655+ jnc 110f
23656+ GET_CR0_INTO_RDI
23657+ btr $16,%rdi
23658+ SET_RDI_INTO_CR0
23659+110:
23660+#endif
23661+
23662+#ifdef CONFIG_PAX_MEMORY_UDEREF
23663+ btr $2,%ebx
23664+ jnc 111f
23665+ GET_CR3_INTO_RDI
23666+ add $4097,%rdi
23667+ bts $63,%rdi
23668+ SET_RDI_INTO_CR3
23669+ mov $__KERNEL_DS,%edi
23670+ mov %edi,%ss
23671+111:
23672+#endif
23673+ .endm
23674+
23675+ .macro pax_erase_kstack
23676+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
23677+ call pax_erase_kstack
23678+#endif
23679+ .endm
23680+
23681+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
23682+ENTRY(pax_erase_kstack)
23683+ pushq %rdi
23684+ pushq %rcx
23685+ pushq %rax
23686+ pushq %r11
23687+
23688+ GET_THREAD_INFO(%r11)
23689+ mov TI_lowest_stack(%r11), %rdi
23690+ mov $-0xBEEF, %rax
23691+ std
23692+
23693+1: mov %edi, %ecx
23694+ and $THREAD_SIZE_asm - 1, %ecx
23695+ shr $3, %ecx
23696+ repne scasq
23697+ jecxz 2f
23698+
23699+ cmp $2*8, %ecx
23700+ jc 2f
23701+
23702+ mov $2*8, %ecx
23703+ repe scasq
23704+ jecxz 2f
23705+ jne 1b
23706+
23707+2: cld
23708+ mov %esp, %ecx
23709+ sub %edi, %ecx
23710+
23711+ cmp $THREAD_SIZE_asm, %rcx
23712+ jb 3f
23713+ ud2
23714+3:
23715+
23716+ shr $3, %ecx
23717+ rep stosq
23718+
23719+ mov TI_task_thread_sp0(%r11), %rdi
23720+ sub $256, %rdi
23721+ mov %rdi, TI_lowest_stack(%r11)
23722+
23723+ popq %r11
23724+ popq %rax
23725+ popq %rcx
23726+ popq %rdi
23727+ pax_force_retaddr
23728+ ret
23729+ENDPROC(pax_erase_kstack)
23730+#endif
23731
23732 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
23733 #ifdef CONFIG_TRACE_IRQFLAGS
23734@@ -320,7 +753,7 @@ ENDPROC(native_usergs_sysret64)
23735 .endm
23736
23737 .macro TRACE_IRQS_IRETQ_DEBUG offset=ARGOFFSET
23738- bt $9,EFLAGS-\offset(%rsp) /* interrupts off? */
23739+ bt $X86_EFLAGS_IF_BIT,EFLAGS-\offset(%rsp) /* interrupts off? */
23740 jnc 1f
23741 TRACE_IRQS_ON_DEBUG
23742 1:
23743@@ -358,27 +791,6 @@ ENDPROC(native_usergs_sysret64)
23744 movq \tmp,R11+\offset(%rsp)
23745 .endm
23746
23747- .macro FAKE_STACK_FRAME child_rip
23748- /* push in order ss, rsp, eflags, cs, rip */
23749- xorl %eax, %eax
23750- pushq_cfi $__KERNEL_DS /* ss */
23751- /*CFI_REL_OFFSET ss,0*/
23752- pushq_cfi %rax /* rsp */
23753- CFI_REL_OFFSET rsp,0
23754- pushq_cfi $(X86_EFLAGS_IF|X86_EFLAGS_FIXED) /* eflags - interrupts on */
23755- /*CFI_REL_OFFSET rflags,0*/
23756- pushq_cfi $__KERNEL_CS /* cs */
23757- /*CFI_REL_OFFSET cs,0*/
23758- pushq_cfi \child_rip /* rip */
23759- CFI_REL_OFFSET rip,0
23760- pushq_cfi %rax /* orig rax */
23761- .endm
23762-
23763- .macro UNFAKE_STACK_FRAME
23764- addq $8*6, %rsp
23765- CFI_ADJUST_CFA_OFFSET -(6*8)
23766- .endm
23767-
23768 /*
23769 * initial frame state for interrupts (and exceptions without error code)
23770 */
23771@@ -445,25 +857,26 @@ ENDPROC(native_usergs_sysret64)
23772 /* save partial stack frame */
23773 .macro SAVE_ARGS_IRQ
23774 cld
23775- /* start from rbp in pt_regs and jump over */
23776- movq_cfi rdi, (RDI-RBP)
23777- movq_cfi rsi, (RSI-RBP)
23778- movq_cfi rdx, (RDX-RBP)
23779- movq_cfi rcx, (RCX-RBP)
23780- movq_cfi rax, (RAX-RBP)
23781- movq_cfi r8, (R8-RBP)
23782- movq_cfi r9, (R9-RBP)
23783- movq_cfi r10, (R10-RBP)
23784- movq_cfi r11, (R11-RBP)
23785+ /* start from r15 in pt_regs and jump over */
23786+ movq_cfi rdi, RDI
23787+ movq_cfi rsi, RSI
23788+ movq_cfi rdx, RDX
23789+ movq_cfi rcx, RCX
23790+ movq_cfi rax, RAX
23791+ movq_cfi r8, R8
23792+ movq_cfi r9, R9
23793+ movq_cfi r10, R10
23794+ movq_cfi r11, R11
23795+ movq_cfi r12, R12
23796
23797 /* Save rbp so that we can unwind from get_irq_regs() */
23798- movq_cfi rbp, 0
23799+ movq_cfi rbp, RBP
23800
23801 /* Save previous stack value */
23802 movq %rsp, %rsi
23803
23804- leaq -RBP(%rsp),%rdi /* arg1 for handler */
23805- testl $3, CS-RBP(%rsi)
23806+ movq %rsp,%rdi /* arg1 for handler */
23807+ testb $3, CS(%rsi)
23808 je 1f
23809 SWAPGS
23810 /*
23811@@ -483,6 +896,18 @@ ENDPROC(native_usergs_sysret64)
23812 0x06 /* DW_OP_deref */, \
23813 0x08 /* DW_OP_const1u */, SS+8-RBP, \
23814 0x22 /* DW_OP_plus */
23815+
23816+#ifdef CONFIG_PAX_MEMORY_UDEREF
23817+ testb $3, CS(%rdi)
23818+ jnz 1f
23819+ pax_enter_kernel
23820+ jmp 2f
23821+1: pax_enter_kernel_user
23822+2:
23823+#else
23824+ pax_enter_kernel
23825+#endif
23826+
23827 /* We entered an interrupt context - irqs are off: */
23828 TRACE_IRQS_OFF
23829 .endm
23830@@ -514,9 +939,52 @@ ENTRY(save_paranoid)
23831 js 1f /* negative -> in kernel */
23832 SWAPGS
23833 xorl %ebx,%ebx
23834-1: ret
23835+1:
23836+#ifdef CONFIG_PAX_MEMORY_UDEREF
23837+ testb $3, CS+8(%rsp)
23838+ jnz 1f
23839+ pax_enter_kernel
23840+ jmp 2f
23841+1: pax_enter_kernel_user
23842+2:
23843+#else
23844+ pax_enter_kernel
23845+#endif
23846+ pax_force_retaddr
23847+ ret
23848 CFI_ENDPROC
23849-END(save_paranoid)
23850+ENDPROC(save_paranoid)
23851+
23852+ENTRY(save_paranoid_nmi)
23853+ XCPT_FRAME 1 RDI+8
23854+ cld
23855+ movq_cfi rdi, RDI+8
23856+ movq_cfi rsi, RSI+8
23857+ movq_cfi rdx, RDX+8
23858+ movq_cfi rcx, RCX+8
23859+ movq_cfi rax, RAX+8
23860+ movq_cfi r8, R8+8
23861+ movq_cfi r9, R9+8
23862+ movq_cfi r10, R10+8
23863+ movq_cfi r11, R11+8
23864+ movq_cfi rbx, RBX+8
23865+ movq_cfi rbp, RBP+8
23866+ movq_cfi r12, R12+8
23867+ movq_cfi r13, R13+8
23868+ movq_cfi r14, R14+8
23869+ movq_cfi r15, R15+8
23870+ movl $1,%ebx
23871+ movl $MSR_GS_BASE,%ecx
23872+ rdmsr
23873+ testl %edx,%edx
23874+ js 1f /* negative -> in kernel */
23875+ SWAPGS
23876+ xorl %ebx,%ebx
23877+1: pax_enter_kernel_nmi
23878+ pax_force_retaddr
23879+ ret
23880+ CFI_ENDPROC
23881+ENDPROC(save_paranoid_nmi)
23882 .popsection
23883
23884 /*
23885@@ -538,7 +1006,7 @@ ENTRY(ret_from_fork)
23886
23887 RESTORE_REST
23888
23889- testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
23890+ testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
23891 jz 1f
23892
23893 testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
23894@@ -548,15 +1016,13 @@ ENTRY(ret_from_fork)
23895 jmp ret_from_sys_call # go to the SYSRET fastpath
23896
23897 1:
23898- subq $REST_SKIP, %rsp # leave space for volatiles
23899- CFI_ADJUST_CFA_OFFSET REST_SKIP
23900 movq %rbp, %rdi
23901 call *%rbx
23902 movl $0, RAX(%rsp)
23903 RESTORE_REST
23904 jmp int_ret_from_sys_call
23905 CFI_ENDPROC
23906-END(ret_from_fork)
23907+ENDPROC(ret_from_fork)
23908
23909 /*
23910 * System call entry. Up to 6 arguments in registers are supported.
23911@@ -593,7 +1059,7 @@ END(ret_from_fork)
23912 ENTRY(system_call)
23913 CFI_STARTPROC simple
23914 CFI_SIGNAL_FRAME
23915- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
23916+ CFI_DEF_CFA rsp,0
23917 CFI_REGISTER rip,rcx
23918 /*CFI_REGISTER rflags,r11*/
23919 SWAPGS_UNSAFE_STACK
23920@@ -606,16 +1072,23 @@ GLOBAL(system_call_after_swapgs)
23921
23922 movq %rsp,PER_CPU_VAR(old_rsp)
23923 movq PER_CPU_VAR(kernel_stack),%rsp
23924+ SAVE_ARGS 8*6,0
23925+ pax_enter_kernel_user
23926+
23927+#ifdef CONFIG_PAX_RANDKSTACK
23928+ pax_erase_kstack
23929+#endif
23930+
23931 /*
23932 * No need to follow this irqs off/on section - it's straight
23933 * and short:
23934 */
23935 ENABLE_INTERRUPTS(CLBR_NONE)
23936- SAVE_ARGS 8,0
23937 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
23938 movq %rcx,RIP-ARGOFFSET(%rsp)
23939 CFI_REL_OFFSET rip,RIP-ARGOFFSET
23940- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
23941+ GET_THREAD_INFO(%rcx)
23942+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%rcx)
23943 jnz tracesys
23944 system_call_fastpath:
23945 #if __SYSCALL_MASK == ~0
23946@@ -639,10 +1112,13 @@ sysret_check:
23947 LOCKDEP_SYS_EXIT
23948 DISABLE_INTERRUPTS(CLBR_NONE)
23949 TRACE_IRQS_OFF
23950- movl TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET),%edx
23951+ GET_THREAD_INFO(%rcx)
23952+ movl TI_flags(%rcx),%edx
23953 andl %edi,%edx
23954 jnz sysret_careful
23955 CFI_REMEMBER_STATE
23956+ pax_exit_kernel_user
23957+ pax_erase_kstack
23958 /*
23959 * sysretq will re-enable interrupts:
23960 */
23961@@ -701,6 +1177,9 @@ auditsys:
23962 movq %rax,%rsi /* 2nd arg: syscall number */
23963 movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
23964 call __audit_syscall_entry
23965+
23966+ pax_erase_kstack
23967+
23968 LOAD_ARGS 0 /* reload call-clobbered registers */
23969 jmp system_call_fastpath
23970
23971@@ -722,7 +1201,7 @@ sysret_audit:
23972 /* Do syscall tracing */
23973 tracesys:
23974 #ifdef CONFIG_AUDITSYSCALL
23975- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
23976+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%rcx)
23977 jz auditsys
23978 #endif
23979 SAVE_REST
23980@@ -730,12 +1209,15 @@ tracesys:
23981 FIXUP_TOP_OF_STACK %rdi
23982 movq %rsp,%rdi
23983 call syscall_trace_enter
23984+
23985+ pax_erase_kstack
23986+
23987 /*
23988 * Reload arg registers from stack in case ptrace changed them.
23989 * We don't reload %rax because syscall_trace_enter() returned
23990 * the value it wants us to use in the table lookup.
23991 */
23992- LOAD_ARGS ARGOFFSET, 1
23993+ LOAD_ARGS 1
23994 RESTORE_REST
23995 #if __SYSCALL_MASK == ~0
23996 cmpq $__NR_syscall_max,%rax
23997@@ -765,7 +1247,9 @@ GLOBAL(int_with_check)
23998 andl %edi,%edx
23999 jnz int_careful
24000 andl $~TS_COMPAT,TI_status(%rcx)
24001- jmp retint_swapgs
24002+ pax_exit_kernel_user
24003+ pax_erase_kstack
24004+ jmp retint_swapgs_pax
24005
24006 /* Either reschedule or signal or syscall exit tracking needed. */
24007 /* First do a reschedule test. */
24008@@ -811,7 +1295,7 @@ int_restore_rest:
24009 TRACE_IRQS_OFF
24010 jmp int_with_check
24011 CFI_ENDPROC
24012-END(system_call)
24013+ENDPROC(system_call)
24014
24015 .macro FORK_LIKE func
24016 ENTRY(stub_\func)
24017@@ -824,9 +1308,10 @@ ENTRY(stub_\func)
24018 DEFAULT_FRAME 0 8 /* offset 8: return address */
24019 call sys_\func
24020 RESTORE_TOP_OF_STACK %r11, 8
24021- ret $REST_SKIP /* pop extended registers */
24022+ pax_force_retaddr
24023+ ret
24024 CFI_ENDPROC
24025-END(stub_\func)
24026+ENDPROC(stub_\func)
24027 .endm
24028
24029 .macro FIXED_FRAME label,func
24030@@ -836,9 +1321,10 @@ ENTRY(\label)
24031 FIXUP_TOP_OF_STACK %r11, 8-ARGOFFSET
24032 call \func
24033 RESTORE_TOP_OF_STACK %r11, 8-ARGOFFSET
24034+ pax_force_retaddr
24035 ret
24036 CFI_ENDPROC
24037-END(\label)
24038+ENDPROC(\label)
24039 .endm
24040
24041 FORK_LIKE clone
24042@@ -846,19 +1332,6 @@ END(\label)
24043 FORK_LIKE vfork
24044 FIXED_FRAME stub_iopl, sys_iopl
24045
24046-ENTRY(ptregscall_common)
24047- DEFAULT_FRAME 1 8 /* offset 8: return address */
24048- RESTORE_TOP_OF_STACK %r11, 8
24049- movq_cfi_restore R15+8, r15
24050- movq_cfi_restore R14+8, r14
24051- movq_cfi_restore R13+8, r13
24052- movq_cfi_restore R12+8, r12
24053- movq_cfi_restore RBP+8, rbp
24054- movq_cfi_restore RBX+8, rbx
24055- ret $REST_SKIP /* pop extended registers */
24056- CFI_ENDPROC
24057-END(ptregscall_common)
24058-
24059 ENTRY(stub_execve)
24060 CFI_STARTPROC
24061 addq $8, %rsp
24062@@ -870,7 +1343,7 @@ ENTRY(stub_execve)
24063 RESTORE_REST
24064 jmp int_ret_from_sys_call
24065 CFI_ENDPROC
24066-END(stub_execve)
24067+ENDPROC(stub_execve)
24068
24069 /*
24070 * sigreturn is special because it needs to restore all registers on return.
24071@@ -887,7 +1360,7 @@ ENTRY(stub_rt_sigreturn)
24072 RESTORE_REST
24073 jmp int_ret_from_sys_call
24074 CFI_ENDPROC
24075-END(stub_rt_sigreturn)
24076+ENDPROC(stub_rt_sigreturn)
24077
24078 #ifdef CONFIG_X86_X32_ABI
24079 ENTRY(stub_x32_rt_sigreturn)
24080@@ -901,7 +1374,7 @@ ENTRY(stub_x32_rt_sigreturn)
24081 RESTORE_REST
24082 jmp int_ret_from_sys_call
24083 CFI_ENDPROC
24084-END(stub_x32_rt_sigreturn)
24085+ENDPROC(stub_x32_rt_sigreturn)
24086
24087 ENTRY(stub_x32_execve)
24088 CFI_STARTPROC
24089@@ -915,7 +1388,7 @@ ENTRY(stub_x32_execve)
24090 RESTORE_REST
24091 jmp int_ret_from_sys_call
24092 CFI_ENDPROC
24093-END(stub_x32_execve)
24094+ENDPROC(stub_x32_execve)
24095
24096 #endif
24097
24098@@ -952,7 +1425,7 @@ vector=vector+1
24099 2: jmp common_interrupt
24100 .endr
24101 CFI_ENDPROC
24102-END(irq_entries_start)
24103+ENDPROC(irq_entries_start)
24104
24105 .previous
24106 END(interrupt)
24107@@ -969,8 +1442,8 @@ END(interrupt)
24108 /* 0(%rsp): ~(interrupt number) */
24109 .macro interrupt func
24110 /* reserve pt_regs for scratch regs and rbp */
24111- subq $ORIG_RAX-RBP, %rsp
24112- CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP
24113+ subq $ORIG_RAX, %rsp
24114+ CFI_ADJUST_CFA_OFFSET ORIG_RAX
24115 SAVE_ARGS_IRQ
24116 call \func
24117 .endm
24118@@ -997,14 +1470,14 @@ ret_from_intr:
24119
24120 /* Restore saved previous stack */
24121 popq %rsi
24122- CFI_DEF_CFA rsi,SS+8-RBP /* reg/off reset after def_cfa_expr */
24123- leaq ARGOFFSET-RBP(%rsi), %rsp
24124+ CFI_DEF_CFA rsi,SS+8 /* reg/off reset after def_cfa_expr */
24125+ movq %rsi, %rsp
24126 CFI_DEF_CFA_REGISTER rsp
24127- CFI_ADJUST_CFA_OFFSET RBP-ARGOFFSET
24128+ CFI_ADJUST_CFA_OFFSET -ARGOFFSET
24129
24130 exit_intr:
24131 GET_THREAD_INFO(%rcx)
24132- testl $3,CS-ARGOFFSET(%rsp)
24133+ testb $3,CS-ARGOFFSET(%rsp)
24134 je retint_kernel
24135
24136 /* Interrupt came from user space */
24137@@ -1026,12 +1499,16 @@ retint_swapgs: /* return to user-space */
24138 * The iretq could re-enable interrupts:
24139 */
24140 DISABLE_INTERRUPTS(CLBR_ANY)
24141+ pax_exit_kernel_user
24142+retint_swapgs_pax:
24143 TRACE_IRQS_IRETQ
24144 SWAPGS
24145 jmp restore_args
24146
24147 retint_restore_args: /* return to kernel space */
24148 DISABLE_INTERRUPTS(CLBR_ANY)
24149+ pax_exit_kernel
24150+ pax_force_retaddr (RIP-ARGOFFSET)
24151 /*
24152 * The iretq could re-enable interrupts:
24153 */
24154@@ -1112,7 +1589,7 @@ ENTRY(retint_kernel)
24155 #endif
24156
24157 CFI_ENDPROC
24158-END(common_interrupt)
24159+ENDPROC(common_interrupt)
24160 /*
24161 * End of kprobes section
24162 */
24163@@ -1130,7 +1607,7 @@ ENTRY(\sym)
24164 interrupt \do_sym
24165 jmp ret_from_intr
24166 CFI_ENDPROC
24167-END(\sym)
24168+ENDPROC(\sym)
24169 .endm
24170
24171 #ifdef CONFIG_TRACING
24172@@ -1218,7 +1695,7 @@ ENTRY(\sym)
24173 call \do_sym
24174 jmp error_exit /* %ebx: no swapgs flag */
24175 CFI_ENDPROC
24176-END(\sym)
24177+ENDPROC(\sym)
24178 .endm
24179
24180 .macro paranoidzeroentry sym do_sym
24181@@ -1236,10 +1713,10 @@ ENTRY(\sym)
24182 call \do_sym
24183 jmp paranoid_exit /* %ebx: no swapgs flag */
24184 CFI_ENDPROC
24185-END(\sym)
24186+ENDPROC(\sym)
24187 .endm
24188
24189-#define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8)
24190+#define INIT_TSS_IST(x) (TSS_ist + ((x) - 1) * 8)(%r13)
24191 .macro paranoidzeroentry_ist sym do_sym ist
24192 ENTRY(\sym)
24193 INTR_FRAME
24194@@ -1252,12 +1729,18 @@ ENTRY(\sym)
24195 TRACE_IRQS_OFF_DEBUG
24196 movq %rsp,%rdi /* pt_regs pointer */
24197 xorl %esi,%esi /* no error code */
24198+#ifdef CONFIG_SMP
24199+ imul $TSS_size, PER_CPU_VAR(cpu_number), %r13d
24200+ lea init_tss(%r13), %r13
24201+#else
24202+ lea init_tss(%rip), %r13
24203+#endif
24204 subq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
24205 call \do_sym
24206 addq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
24207 jmp paranoid_exit /* %ebx: no swapgs flag */
24208 CFI_ENDPROC
24209-END(\sym)
24210+ENDPROC(\sym)
24211 .endm
24212
24213 .macro errorentry sym do_sym
24214@@ -1275,7 +1758,7 @@ ENTRY(\sym)
24215 call \do_sym
24216 jmp error_exit /* %ebx: no swapgs flag */
24217 CFI_ENDPROC
24218-END(\sym)
24219+ENDPROC(\sym)
24220 .endm
24221
24222 #ifdef CONFIG_TRACING
24223@@ -1306,7 +1789,7 @@ ENTRY(\sym)
24224 call \do_sym
24225 jmp paranoid_exit /* %ebx: no swapgs flag */
24226 CFI_ENDPROC
24227-END(\sym)
24228+ENDPROC(\sym)
24229 .endm
24230
24231 zeroentry divide_error do_divide_error
24232@@ -1336,9 +1819,10 @@ gs_change:
24233 2: mfence /* workaround */
24234 SWAPGS
24235 popfq_cfi
24236+ pax_force_retaddr
24237 ret
24238 CFI_ENDPROC
24239-END(native_load_gs_index)
24240+ENDPROC(native_load_gs_index)
24241
24242 _ASM_EXTABLE(gs_change,bad_gs)
24243 .section .fixup,"ax"
24244@@ -1366,9 +1850,10 @@ ENTRY(do_softirq_own_stack)
24245 CFI_DEF_CFA_REGISTER rsp
24246 CFI_ADJUST_CFA_OFFSET -8
24247 decl PER_CPU_VAR(irq_count)
24248+ pax_force_retaddr
24249 ret
24250 CFI_ENDPROC
24251-END(do_softirq_own_stack)
24252+ENDPROC(do_softirq_own_stack)
24253
24254 #ifdef CONFIG_XEN
24255 zeroentry xen_hypervisor_callback xen_do_hypervisor_callback
24256@@ -1406,7 +1891,7 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
24257 decl PER_CPU_VAR(irq_count)
24258 jmp error_exit
24259 CFI_ENDPROC
24260-END(xen_do_hypervisor_callback)
24261+ENDPROC(xen_do_hypervisor_callback)
24262
24263 /*
24264 * Hypervisor uses this for application faults while it executes.
24265@@ -1465,7 +1950,7 @@ ENTRY(xen_failsafe_callback)
24266 SAVE_ALL
24267 jmp error_exit
24268 CFI_ENDPROC
24269-END(xen_failsafe_callback)
24270+ENDPROC(xen_failsafe_callback)
24271
24272 apicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \
24273 xen_hvm_callback_vector xen_evtchn_do_upcall
24274@@ -1517,18 +2002,33 @@ ENTRY(paranoid_exit)
24275 DEFAULT_FRAME
24276 DISABLE_INTERRUPTS(CLBR_NONE)
24277 TRACE_IRQS_OFF_DEBUG
24278- testl %ebx,%ebx /* swapgs needed? */
24279+ testl $1,%ebx /* swapgs needed? */
24280 jnz paranoid_restore
24281- testl $3,CS(%rsp)
24282+ testb $3,CS(%rsp)
24283 jnz paranoid_userspace
24284+#ifdef CONFIG_PAX_MEMORY_UDEREF
24285+ pax_exit_kernel
24286+ TRACE_IRQS_IRETQ 0
24287+ SWAPGS_UNSAFE_STACK
24288+ RESTORE_ALL 8
24289+ pax_force_retaddr_bts
24290+ jmp irq_return
24291+#endif
24292 paranoid_swapgs:
24293+#ifdef CONFIG_PAX_MEMORY_UDEREF
24294+ pax_exit_kernel_user
24295+#else
24296+ pax_exit_kernel
24297+#endif
24298 TRACE_IRQS_IRETQ 0
24299 SWAPGS_UNSAFE_STACK
24300 RESTORE_ALL 8
24301 jmp irq_return
24302 paranoid_restore:
24303+ pax_exit_kernel
24304 TRACE_IRQS_IRETQ_DEBUG 0
24305 RESTORE_ALL 8
24306+ pax_force_retaddr_bts
24307 jmp irq_return
24308 paranoid_userspace:
24309 GET_THREAD_INFO(%rcx)
24310@@ -1557,7 +2057,7 @@ paranoid_schedule:
24311 TRACE_IRQS_OFF
24312 jmp paranoid_userspace
24313 CFI_ENDPROC
24314-END(paranoid_exit)
24315+ENDPROC(paranoid_exit)
24316
24317 /*
24318 * Exception entry point. This expects an error code/orig_rax on the stack.
24319@@ -1584,12 +2084,23 @@ ENTRY(error_entry)
24320 movq_cfi r14, R14+8
24321 movq_cfi r15, R15+8
24322 xorl %ebx,%ebx
24323- testl $3,CS+8(%rsp)
24324+ testb $3,CS+8(%rsp)
24325 je error_kernelspace
24326 error_swapgs:
24327 SWAPGS
24328 error_sti:
24329+#ifdef CONFIG_PAX_MEMORY_UDEREF
24330+ testb $3, CS+8(%rsp)
24331+ jnz 1f
24332+ pax_enter_kernel
24333+ jmp 2f
24334+1: pax_enter_kernel_user
24335+2:
24336+#else
24337+ pax_enter_kernel
24338+#endif
24339 TRACE_IRQS_OFF
24340+ pax_force_retaddr
24341 ret
24342
24343 /*
24344@@ -1616,7 +2127,7 @@ bstep_iret:
24345 movq %rcx,RIP+8(%rsp)
24346 jmp error_swapgs
24347 CFI_ENDPROC
24348-END(error_entry)
24349+ENDPROC(error_entry)
24350
24351
24352 /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
24353@@ -1627,7 +2138,7 @@ ENTRY(error_exit)
24354 DISABLE_INTERRUPTS(CLBR_NONE)
24355 TRACE_IRQS_OFF
24356 GET_THREAD_INFO(%rcx)
24357- testl %eax,%eax
24358+ testl $1,%eax
24359 jne retint_kernel
24360 LOCKDEP_SYS_EXIT_IRQ
24361 movl TI_flags(%rcx),%edx
24362@@ -1636,7 +2147,7 @@ ENTRY(error_exit)
24363 jnz retint_careful
24364 jmp retint_swapgs
24365 CFI_ENDPROC
24366-END(error_exit)
24367+ENDPROC(error_exit)
24368
24369 /*
24370 * Test if a given stack is an NMI stack or not.
24371@@ -1694,9 +2205,11 @@ ENTRY(nmi)
24372 * If %cs was not the kernel segment, then the NMI triggered in user
24373 * space, which means it is definitely not nested.
24374 */
24375+ cmpl $__KERNEXEC_KERNEL_CS, 16(%rsp)
24376+ je 1f
24377 cmpl $__KERNEL_CS, 16(%rsp)
24378 jne first_nmi
24379-
24380+1:
24381 /*
24382 * Check the special variable on the stack to see if NMIs are
24383 * executing.
24384@@ -1730,8 +2243,7 @@ nested_nmi:
24385
24386 1:
24387 /* Set up the interrupted NMIs stack to jump to repeat_nmi */
24388- leaq -1*8(%rsp), %rdx
24389- movq %rdx, %rsp
24390+ subq $8, %rsp
24391 CFI_ADJUST_CFA_OFFSET 1*8
24392 leaq -10*8(%rsp), %rdx
24393 pushq_cfi $__KERNEL_DS
24394@@ -1749,6 +2261,7 @@ nested_nmi_out:
24395 CFI_RESTORE rdx
24396
24397 /* No need to check faults here */
24398+# pax_force_retaddr_bts
24399 INTERRUPT_RETURN
24400
24401 CFI_RESTORE_STATE
24402@@ -1845,13 +2358,13 @@ end_repeat_nmi:
24403 subq $ORIG_RAX-R15, %rsp
24404 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
24405 /*
24406- * Use save_paranoid to handle SWAPGS, but no need to use paranoid_exit
24407+ * Use save_paranoid_nmi to handle SWAPGS, but no need to use paranoid_exit
24408 * as we should not be calling schedule in NMI context.
24409 * Even with normal interrupts enabled. An NMI should not be
24410 * setting NEED_RESCHED or anything that normal interrupts and
24411 * exceptions might do.
24412 */
24413- call save_paranoid
24414+ call save_paranoid_nmi
24415 DEFAULT_FRAME 0
24416
24417 /*
24418@@ -1861,9 +2374,9 @@ end_repeat_nmi:
24419 * NMI itself takes a page fault, the page fault that was preempted
24420 * will read the information from the NMI page fault and not the
24421 * origin fault. Save it off and restore it if it changes.
24422- * Use the r12 callee-saved register.
24423+ * Use the r13 callee-saved register.
24424 */
24425- movq %cr2, %r12
24426+ movq %cr2, %r13
24427
24428 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
24429 movq %rsp,%rdi
24430@@ -1872,31 +2385,36 @@ end_repeat_nmi:
24431
24432 /* Did the NMI take a page fault? Restore cr2 if it did */
24433 movq %cr2, %rcx
24434- cmpq %rcx, %r12
24435+ cmpq %rcx, %r13
24436 je 1f
24437- movq %r12, %cr2
24438+ movq %r13, %cr2
24439 1:
24440
24441- testl %ebx,%ebx /* swapgs needed? */
24442+ testl $1,%ebx /* swapgs needed? */
24443 jnz nmi_restore
24444 nmi_swapgs:
24445 SWAPGS_UNSAFE_STACK
24446 nmi_restore:
24447+ pax_exit_kernel_nmi
24448 /* Pop the extra iret frame at once */
24449 RESTORE_ALL 6*8
24450+ testb $3, 8(%rsp)
24451+ jnz 1f
24452+ pax_force_retaddr_bts
24453+1:
24454
24455 /* Clear the NMI executing stack variable */
24456 movq $0, 5*8(%rsp)
24457 jmp irq_return
24458 CFI_ENDPROC
24459-END(nmi)
24460+ENDPROC(nmi)
24461
24462 ENTRY(ignore_sysret)
24463 CFI_STARTPROC
24464 mov $-ENOSYS,%eax
24465 sysret
24466 CFI_ENDPROC
24467-END(ignore_sysret)
24468+ENDPROC(ignore_sysret)
24469
24470 /*
24471 * End of kprobes section
24472diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
24473index 1ffc32d..e52c745 100644
24474--- a/arch/x86/kernel/ftrace.c
24475+++ b/arch/x86/kernel/ftrace.c
24476@@ -104,6 +104,8 @@ ftrace_modify_code_direct(unsigned long ip, unsigned const char *old_code,
24477 {
24478 unsigned char replaced[MCOUNT_INSN_SIZE];
24479
24480+ ip = ktla_ktva(ip);
24481+
24482 /*
24483 * Note: Due to modules and __init, code can
24484 * disappear and change, we need to protect against faulting
24485@@ -229,7 +231,7 @@ static int update_ftrace_func(unsigned long ip, void *new)
24486 unsigned char old[MCOUNT_INSN_SIZE];
24487 int ret;
24488
24489- memcpy(old, (void *)ip, MCOUNT_INSN_SIZE);
24490+ memcpy(old, (void *)ktla_ktva(ip), MCOUNT_INSN_SIZE);
24491
24492 ftrace_update_func = ip;
24493 /* Make sure the breakpoints see the ftrace_update_func update */
24494@@ -306,7 +308,7 @@ static int ftrace_write(unsigned long ip, const char *val, int size)
24495 * kernel identity mapping to modify code.
24496 */
24497 if (within(ip, (unsigned long)_text, (unsigned long)_etext))
24498- ip = (unsigned long)__va(__pa_symbol(ip));
24499+ ip = (unsigned long)__va(__pa_symbol(ktla_ktva(ip)));
24500
24501 return probe_kernel_write((void *)ip, val, size);
24502 }
24503@@ -316,7 +318,7 @@ static int add_break(unsigned long ip, const char *old)
24504 unsigned char replaced[MCOUNT_INSN_SIZE];
24505 unsigned char brk = BREAKPOINT_INSTRUCTION;
24506
24507- if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE))
24508+ if (probe_kernel_read(replaced, (void *)ktla_ktva(ip), MCOUNT_INSN_SIZE))
24509 return -EFAULT;
24510
24511 /* Make sure it is what we expect it to be */
24512@@ -664,7 +666,7 @@ ftrace_modify_code(unsigned long ip, unsigned const char *old_code,
24513 return ret;
24514
24515 fail_update:
24516- probe_kernel_write((void *)ip, &old_code[0], 1);
24517+ probe_kernel_write((void *)ktla_ktva(ip), &old_code[0], 1);
24518 goto out;
24519 }
24520
24521diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
24522index 85126cc..1bbce17 100644
24523--- a/arch/x86/kernel/head64.c
24524+++ b/arch/x86/kernel/head64.c
24525@@ -67,12 +67,12 @@ again:
24526 pgd = *pgd_p;
24527
24528 /*
24529- * The use of __START_KERNEL_map rather than __PAGE_OFFSET here is
24530- * critical -- __PAGE_OFFSET would point us back into the dynamic
24531+ * The use of __early_va rather than __va here is critical:
24532+ * __va would point us back into the dynamic
24533 * range and we might end up looping forever...
24534 */
24535 if (pgd)
24536- pud_p = (pudval_t *)((pgd & PTE_PFN_MASK) + __START_KERNEL_map - phys_base);
24537+ pud_p = (pudval_t *)(__early_va(pgd & PTE_PFN_MASK));
24538 else {
24539 if (next_early_pgt >= EARLY_DYNAMIC_PAGE_TABLES) {
24540 reset_early_page_tables();
24541@@ -82,13 +82,13 @@ again:
24542 pud_p = (pudval_t *)early_dynamic_pgts[next_early_pgt++];
24543 for (i = 0; i < PTRS_PER_PUD; i++)
24544 pud_p[i] = 0;
24545- *pgd_p = (pgdval_t)pud_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE;
24546+ *pgd_p = (pgdval_t)__pa(pud_p) + _KERNPG_TABLE;
24547 }
24548 pud_p += pud_index(address);
24549 pud = *pud_p;
24550
24551 if (pud)
24552- pmd_p = (pmdval_t *)((pud & PTE_PFN_MASK) + __START_KERNEL_map - phys_base);
24553+ pmd_p = (pmdval_t *)(__early_va(pud & PTE_PFN_MASK));
24554 else {
24555 if (next_early_pgt >= EARLY_DYNAMIC_PAGE_TABLES) {
24556 reset_early_page_tables();
24557@@ -98,7 +98,7 @@ again:
24558 pmd_p = (pmdval_t *)early_dynamic_pgts[next_early_pgt++];
24559 for (i = 0; i < PTRS_PER_PMD; i++)
24560 pmd_p[i] = 0;
24561- *pud_p = (pudval_t)pmd_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE;
24562+ *pud_p = (pudval_t)__pa(pmd_p) + _KERNPG_TABLE;
24563 }
24564 pmd = (physaddr & PMD_MASK) + early_pmd_flags;
24565 pmd_p[pmd_index(address)] = pmd;
24566@@ -175,7 +175,6 @@ asmlinkage void __init x86_64_start_kernel(char * real_mode_data)
24567 if (console_loglevel == 10)
24568 early_printk("Kernel alive\n");
24569
24570- clear_page(init_level4_pgt);
24571 /* set init_level4_pgt kernel high mapping*/
24572 init_level4_pgt[511] = early_level4_pgt[511];
24573
24574diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
24575index f36bd42..56ee1534 100644
24576--- a/arch/x86/kernel/head_32.S
24577+++ b/arch/x86/kernel/head_32.S
24578@@ -26,6 +26,12 @@
24579 /* Physical address */
24580 #define pa(X) ((X) - __PAGE_OFFSET)
24581
24582+#ifdef CONFIG_PAX_KERNEXEC
24583+#define ta(X) (X)
24584+#else
24585+#define ta(X) ((X) - __PAGE_OFFSET)
24586+#endif
24587+
24588 /*
24589 * References to members of the new_cpu_data structure.
24590 */
24591@@ -55,11 +61,7 @@
24592 * and small than max_low_pfn, otherwise will waste some page table entries
24593 */
24594
24595-#if PTRS_PER_PMD > 1
24596-#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
24597-#else
24598-#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
24599-#endif
24600+#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
24601
24602 /* Number of possible pages in the lowmem region */
24603 LOWMEM_PAGES = (((1<<32) - __PAGE_OFFSET) >> PAGE_SHIFT)
24604@@ -78,6 +80,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_PAGES) * PAGE_SIZE
24605 RESERVE_BRK(pagetables, INIT_MAP_SIZE)
24606
24607 /*
24608+ * Real beginning of normal "text" segment
24609+ */
24610+ENTRY(stext)
24611+ENTRY(_stext)
24612+
24613+/*
24614 * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
24615 * %esi points to the real-mode code as a 32-bit pointer.
24616 * CS and DS must be 4 GB flat segments, but we don't depend on
24617@@ -85,6 +93,13 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
24618 * can.
24619 */
24620 __HEAD
24621+
24622+#ifdef CONFIG_PAX_KERNEXEC
24623+ jmp startup_32
24624+/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
24625+.fill PAGE_SIZE-5,1,0xcc
24626+#endif
24627+
24628 ENTRY(startup_32)
24629 movl pa(stack_start),%ecx
24630
24631@@ -106,6 +121,59 @@ ENTRY(startup_32)
24632 2:
24633 leal -__PAGE_OFFSET(%ecx),%esp
24634
24635+#ifdef CONFIG_SMP
24636+ movl $pa(cpu_gdt_table),%edi
24637+ movl $__per_cpu_load,%eax
24638+ movw %ax,GDT_ENTRY_PERCPU * 8 + 2(%edi)
24639+ rorl $16,%eax
24640+ movb %al,GDT_ENTRY_PERCPU * 8 + 4(%edi)
24641+ movb %ah,GDT_ENTRY_PERCPU * 8 + 7(%edi)
24642+ movl $__per_cpu_end - 1,%eax
24643+ subl $__per_cpu_start,%eax
24644+ movw %ax,GDT_ENTRY_PERCPU * 8 + 0(%edi)
24645+#endif
24646+
24647+#ifdef CONFIG_PAX_MEMORY_UDEREF
24648+ movl $NR_CPUS,%ecx
24649+ movl $pa(cpu_gdt_table),%edi
24650+1:
24651+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
24652+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
24653+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
24654+ addl $PAGE_SIZE_asm,%edi
24655+ loop 1b
24656+#endif
24657+
24658+#ifdef CONFIG_PAX_KERNEXEC
24659+ movl $pa(boot_gdt),%edi
24660+ movl $__LOAD_PHYSICAL_ADDR,%eax
24661+ movw %ax,GDT_ENTRY_BOOT_CS * 8 + 2(%edi)
24662+ rorl $16,%eax
24663+ movb %al,GDT_ENTRY_BOOT_CS * 8 + 4(%edi)
24664+ movb %ah,GDT_ENTRY_BOOT_CS * 8 + 7(%edi)
24665+ rorl $16,%eax
24666+
24667+ ljmp $(__BOOT_CS),$1f
24668+1:
24669+
24670+ movl $NR_CPUS,%ecx
24671+ movl $pa(cpu_gdt_table),%edi
24672+ addl $__PAGE_OFFSET,%eax
24673+1:
24674+ movb $0xc0,GDT_ENTRY_KERNEL_CS * 8 + 6(%edi)
24675+ movb $0xc0,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 6(%edi)
24676+ movw %ax,GDT_ENTRY_KERNEL_CS * 8 + 2(%edi)
24677+ movw %ax,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 2(%edi)
24678+ rorl $16,%eax
24679+ movb %al,GDT_ENTRY_KERNEL_CS * 8 + 4(%edi)
24680+ movb %al,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 4(%edi)
24681+ movb %ah,GDT_ENTRY_KERNEL_CS * 8 + 7(%edi)
24682+ movb %ah,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 7(%edi)
24683+ rorl $16,%eax
24684+ addl $PAGE_SIZE_asm,%edi
24685+ loop 1b
24686+#endif
24687+
24688 /*
24689 * Clear BSS first so that there are no surprises...
24690 */
24691@@ -201,8 +269,11 @@ ENTRY(startup_32)
24692 movl %eax, pa(max_pfn_mapped)
24693
24694 /* Do early initialization of the fixmap area */
24695- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
24696- movl %eax,pa(initial_pg_pmd+0x1000*KPMDS-8)
24697+#ifdef CONFIG_COMPAT_VDSO
24698+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_pg_pmd+0x1000*KPMDS-8)
24699+#else
24700+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_pg_pmd+0x1000*KPMDS-8)
24701+#endif
24702 #else /* Not PAE */
24703
24704 page_pde_offset = (__PAGE_OFFSET >> 20);
24705@@ -232,8 +303,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
24706 movl %eax, pa(max_pfn_mapped)
24707
24708 /* Do early initialization of the fixmap area */
24709- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
24710- movl %eax,pa(initial_page_table+0xffc)
24711+#ifdef CONFIG_COMPAT_VDSO
24712+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_page_table+0xffc)
24713+#else
24714+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_page_table+0xffc)
24715+#endif
24716 #endif
24717
24718 #ifdef CONFIG_PARAVIRT
24719@@ -247,9 +321,7 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
24720 cmpl $num_subarch_entries, %eax
24721 jae bad_subarch
24722
24723- movl pa(subarch_entries)(,%eax,4), %eax
24724- subl $__PAGE_OFFSET, %eax
24725- jmp *%eax
24726+ jmp *pa(subarch_entries)(,%eax,4)
24727
24728 bad_subarch:
24729 WEAK(lguest_entry)
24730@@ -261,10 +333,10 @@ WEAK(xen_entry)
24731 __INITDATA
24732
24733 subarch_entries:
24734- .long default_entry /* normal x86/PC */
24735- .long lguest_entry /* lguest hypervisor */
24736- .long xen_entry /* Xen hypervisor */
24737- .long default_entry /* Moorestown MID */
24738+ .long ta(default_entry) /* normal x86/PC */
24739+ .long ta(lguest_entry) /* lguest hypervisor */
24740+ .long ta(xen_entry) /* Xen hypervisor */
24741+ .long ta(default_entry) /* Moorestown MID */
24742 num_subarch_entries = (. - subarch_entries) / 4
24743 .previous
24744 #else
24745@@ -354,6 +426,7 @@ default_entry:
24746 movl pa(mmu_cr4_features),%eax
24747 movl %eax,%cr4
24748
24749+#ifdef CONFIG_X86_PAE
24750 testb $X86_CR4_PAE, %al # check if PAE is enabled
24751 jz enable_paging
24752
24753@@ -382,6 +455,9 @@ default_entry:
24754 /* Make changes effective */
24755 wrmsr
24756
24757+ btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
24758+#endif
24759+
24760 enable_paging:
24761
24762 /*
24763@@ -449,14 +525,20 @@ is486:
24764 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
24765 movl %eax,%ss # after changing gdt.
24766
24767- movl $(__USER_DS),%eax # DS/ES contains default USER segment
24768+# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
24769 movl %eax,%ds
24770 movl %eax,%es
24771
24772 movl $(__KERNEL_PERCPU), %eax
24773 movl %eax,%fs # set this cpu's percpu
24774
24775+#ifdef CONFIG_CC_STACKPROTECTOR
24776 movl $(__KERNEL_STACK_CANARY),%eax
24777+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
24778+ movl $(__USER_DS),%eax
24779+#else
24780+ xorl %eax,%eax
24781+#endif
24782 movl %eax,%gs
24783
24784 xorl %eax,%eax # Clear LDT
24785@@ -512,8 +594,11 @@ setup_once:
24786 * relocation. Manually set base address in stack canary
24787 * segment descriptor.
24788 */
24789- movl $gdt_page,%eax
24790+ movl $cpu_gdt_table,%eax
24791 movl $stack_canary,%ecx
24792+#ifdef CONFIG_SMP
24793+ addl $__per_cpu_load,%ecx
24794+#endif
24795 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
24796 shrl $16, %ecx
24797 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
24798@@ -548,7 +633,7 @@ ENTRY(early_idt_handler)
24799 cmpl $2,(%esp) # X86_TRAP_NMI
24800 je is_nmi # Ignore NMI
24801
24802- cmpl $2,%ss:early_recursion_flag
24803+ cmpl $1,%ss:early_recursion_flag
24804 je hlt_loop
24805 incl %ss:early_recursion_flag
24806
24807@@ -586,8 +671,8 @@ ENTRY(early_idt_handler)
24808 pushl (20+6*4)(%esp) /* trapno */
24809 pushl $fault_msg
24810 call printk
24811-#endif
24812 call dump_stack
24813+#endif
24814 hlt_loop:
24815 hlt
24816 jmp hlt_loop
24817@@ -607,8 +692,11 @@ ENDPROC(early_idt_handler)
24818 /* This is the default interrupt "handler" :-) */
24819 ALIGN
24820 ignore_int:
24821- cld
24822 #ifdef CONFIG_PRINTK
24823+ cmpl $2,%ss:early_recursion_flag
24824+ je hlt_loop
24825+ incl %ss:early_recursion_flag
24826+ cld
24827 pushl %eax
24828 pushl %ecx
24829 pushl %edx
24830@@ -617,9 +705,6 @@ ignore_int:
24831 movl $(__KERNEL_DS),%eax
24832 movl %eax,%ds
24833 movl %eax,%es
24834- cmpl $2,early_recursion_flag
24835- je hlt_loop
24836- incl early_recursion_flag
24837 pushl 16(%esp)
24838 pushl 24(%esp)
24839 pushl 32(%esp)
24840@@ -653,29 +738,34 @@ ENTRY(setup_once_ref)
24841 /*
24842 * BSS section
24843 */
24844-__PAGE_ALIGNED_BSS
24845- .align PAGE_SIZE
24846 #ifdef CONFIG_X86_PAE
24847+.section .initial_pg_pmd,"a",@progbits
24848 initial_pg_pmd:
24849 .fill 1024*KPMDS,4,0
24850 #else
24851+.section .initial_page_table,"a",@progbits
24852 ENTRY(initial_page_table)
24853 .fill 1024,4,0
24854 #endif
24855+.section .initial_pg_fixmap,"a",@progbits
24856 initial_pg_fixmap:
24857 .fill 1024,4,0
24858+.section .empty_zero_page,"a",@progbits
24859 ENTRY(empty_zero_page)
24860 .fill 4096,1,0
24861+.section .swapper_pg_dir,"a",@progbits
24862 ENTRY(swapper_pg_dir)
24863+#ifdef CONFIG_X86_PAE
24864+ .fill 4,8,0
24865+#else
24866 .fill 1024,4,0
24867+#endif
24868
24869 /*
24870 * This starts the data section.
24871 */
24872 #ifdef CONFIG_X86_PAE
24873-__PAGE_ALIGNED_DATA
24874- /* Page-aligned for the benefit of paravirt? */
24875- .align PAGE_SIZE
24876+.section .initial_page_table,"a",@progbits
24877 ENTRY(initial_page_table)
24878 .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
24879 # if KPMDS == 3
24880@@ -694,12 +784,20 @@ ENTRY(initial_page_table)
24881 # error "Kernel PMDs should be 1, 2 or 3"
24882 # endif
24883 .align PAGE_SIZE /* needs to be page-sized too */
24884+
24885+#ifdef CONFIG_PAX_PER_CPU_PGD
24886+ENTRY(cpu_pgd)
24887+ .rept 2*NR_CPUS
24888+ .fill 4,8,0
24889+ .endr
24890+#endif
24891+
24892 #endif
24893
24894 .data
24895 .balign 4
24896 ENTRY(stack_start)
24897- .long init_thread_union+THREAD_SIZE
24898+ .long init_thread_union+THREAD_SIZE-8
24899
24900 __INITRODATA
24901 int_msg:
24902@@ -727,7 +825,7 @@ fault_msg:
24903 * segment size, and 32-bit linear address value:
24904 */
24905
24906- .data
24907+.section .rodata,"a",@progbits
24908 .globl boot_gdt_descr
24909 .globl idt_descr
24910
24911@@ -736,7 +834,7 @@ fault_msg:
24912 .word 0 # 32 bit align gdt_desc.address
24913 boot_gdt_descr:
24914 .word __BOOT_DS+7
24915- .long boot_gdt - __PAGE_OFFSET
24916+ .long pa(boot_gdt)
24917
24918 .word 0 # 32-bit align idt_desc.address
24919 idt_descr:
24920@@ -747,7 +845,7 @@ idt_descr:
24921 .word 0 # 32 bit align gdt_desc.address
24922 ENTRY(early_gdt_descr)
24923 .word GDT_ENTRIES*8-1
24924- .long gdt_page /* Overwritten for secondary CPUs */
24925+ .long cpu_gdt_table /* Overwritten for secondary CPUs */
24926
24927 /*
24928 * The boot_gdt must mirror the equivalent in setup.S and is
24929@@ -756,5 +854,65 @@ ENTRY(early_gdt_descr)
24930 .align L1_CACHE_BYTES
24931 ENTRY(boot_gdt)
24932 .fill GDT_ENTRY_BOOT_CS,8,0
24933- .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
24934- .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
24935+ .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
24936+ .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
24937+
24938+ .align PAGE_SIZE_asm
24939+ENTRY(cpu_gdt_table)
24940+ .rept NR_CPUS
24941+ .quad 0x0000000000000000 /* NULL descriptor */
24942+ .quad 0x0000000000000000 /* 0x0b reserved */
24943+ .quad 0x0000000000000000 /* 0x13 reserved */
24944+ .quad 0x0000000000000000 /* 0x1b reserved */
24945+
24946+#ifdef CONFIG_PAX_KERNEXEC
24947+ .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */
24948+#else
24949+ .quad 0x0000000000000000 /* 0x20 unused */
24950+#endif
24951+
24952+ .quad 0x0000000000000000 /* 0x28 unused */
24953+ .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
24954+ .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
24955+ .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
24956+ .quad 0x0000000000000000 /* 0x4b reserved */
24957+ .quad 0x0000000000000000 /* 0x53 reserved */
24958+ .quad 0x0000000000000000 /* 0x5b reserved */
24959+
24960+ .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
24961+ .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
24962+ .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
24963+ .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
24964+
24965+ .quad 0x0000000000000000 /* 0x80 TSS descriptor */
24966+ .quad 0x0000000000000000 /* 0x88 LDT descriptor */
24967+
24968+ /*
24969+ * Segments used for calling PnP BIOS have byte granularity.
24970+ * The code segments and data segments have fixed 64k limits,
24971+ * the transfer segment sizes are set at run time.
24972+ */
24973+ .quad 0x00409b000000ffff /* 0x90 32-bit code */
24974+ .quad 0x00009b000000ffff /* 0x98 16-bit code */
24975+ .quad 0x000093000000ffff /* 0xa0 16-bit data */
24976+ .quad 0x0000930000000000 /* 0xa8 16-bit data */
24977+ .quad 0x0000930000000000 /* 0xb0 16-bit data */
24978+
24979+ /*
24980+ * The APM segments have byte granularity and their bases
24981+ * are set at run time. All have 64k limits.
24982+ */
24983+ .quad 0x00409b000000ffff /* 0xb8 APM CS code */
24984+ .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
24985+ .quad 0x004093000000ffff /* 0xc8 APM DS data */
24986+
24987+ .quad 0x00c0930000000000 /* 0xd0 - ESPFIX SS */
24988+ .quad 0x0040930000000000 /* 0xd8 - PERCPU */
24989+ .quad 0x0040910000000017 /* 0xe0 - STACK_CANARY */
24990+ .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
24991+ .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
24992+ .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
24993+
24994+ /* Be sure this is zeroed to avoid false validations in Xen */
24995+ .fill PAGE_SIZE_asm - GDT_SIZE,1,0
24996+ .endr
24997diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
24998index a468c0a..c7dec74 100644
24999--- a/arch/x86/kernel/head_64.S
25000+++ b/arch/x86/kernel/head_64.S
25001@@ -20,6 +20,8 @@
25002 #include <asm/processor-flags.h>
25003 #include <asm/percpu.h>
25004 #include <asm/nops.h>
25005+#include <asm/cpufeature.h>
25006+#include <asm/alternative-asm.h>
25007
25008 #ifdef CONFIG_PARAVIRT
25009 #include <asm/asm-offsets.h>
25010@@ -41,6 +43,12 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET)
25011 L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
25012 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
25013 L3_START_KERNEL = pud_index(__START_KERNEL_map)
25014+L4_VMALLOC_START = pgd_index(VMALLOC_START)
25015+L3_VMALLOC_START = pud_index(VMALLOC_START)
25016+L4_VMALLOC_END = pgd_index(VMALLOC_END)
25017+L3_VMALLOC_END = pud_index(VMALLOC_END)
25018+L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
25019+L3_VMEMMAP_START = pud_index(VMEMMAP_START)
25020
25021 .text
25022 __HEAD
25023@@ -89,11 +97,24 @@ startup_64:
25024 * Fixup the physical addresses in the page table
25025 */
25026 addq %rbp, early_level4_pgt + (L4_START_KERNEL*8)(%rip)
25027+ addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
25028+ addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
25029+ addq %rbp, init_level4_pgt + (L4_VMALLOC_END*8)(%rip)
25030+ addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
25031+ addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
25032
25033- addq %rbp, level3_kernel_pgt + (510*8)(%rip)
25034- addq %rbp, level3_kernel_pgt + (511*8)(%rip)
25035+ addq %rbp, level3_ident_pgt + (0*8)(%rip)
25036+#ifndef CONFIG_XEN
25037+ addq %rbp, level3_ident_pgt + (1*8)(%rip)
25038+#endif
25039+
25040+ addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
25041+
25042+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
25043+ addq %rbp, level3_kernel_pgt + ((L3_START_KERNEL+1)*8)(%rip)
25044
25045 addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
25046+ addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
25047
25048 /*
25049 * Set up the identity mapping for the switchover. These
25050@@ -177,8 +198,8 @@ ENTRY(secondary_startup_64)
25051 movq $(init_level4_pgt - __START_KERNEL_map), %rax
25052 1:
25053
25054- /* Enable PAE mode and PGE */
25055- movl $(X86_CR4_PAE | X86_CR4_PGE), %ecx
25056+ /* Enable PAE mode and PSE/PGE */
25057+ movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %ecx
25058 movq %rcx, %cr4
25059
25060 /* Setup early boot stage 4 level pagetables. */
25061@@ -199,10 +220,19 @@ ENTRY(secondary_startup_64)
25062 movl $MSR_EFER, %ecx
25063 rdmsr
25064 btsl $_EFER_SCE, %eax /* Enable System Call */
25065- btl $20,%edi /* No Execute supported? */
25066+ btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
25067 jnc 1f
25068 btsl $_EFER_NX, %eax
25069 btsq $_PAGE_BIT_NX,early_pmd_flags(%rip)
25070+#ifndef CONFIG_EFI
25071+ btsq $_PAGE_BIT_NX, init_level4_pgt + 8*L4_PAGE_OFFSET(%rip)
25072+#endif
25073+ btsq $_PAGE_BIT_NX, init_level4_pgt + 8*L4_VMALLOC_START(%rip)
25074+ btsq $_PAGE_BIT_NX, init_level4_pgt + 8*L4_VMALLOC_END(%rip)
25075+ btsq $_PAGE_BIT_NX, init_level4_pgt + 8*L4_VMEMMAP_START(%rip)
25076+ btsq $_PAGE_BIT_NX, level2_fixmap_pgt + 8*506(%rip)
25077+ btsq $_PAGE_BIT_NX, level2_fixmap_pgt + 8*507(%rip)
25078+ btsq $_PAGE_BIT_NX, __supported_pte_mask(%rip)
25079 1: wrmsr /* Make changes effective */
25080
25081 /* Setup cr0 */
25082@@ -282,6 +312,7 @@ ENTRY(secondary_startup_64)
25083 * REX.W + FF /5 JMP m16:64 Jump far, absolute indirect,
25084 * address given in m16:64.
25085 */
25086+ pax_set_fptr_mask
25087 movq initial_code(%rip),%rax
25088 pushq $0 # fake return address to stop unwinder
25089 pushq $__KERNEL_CS # set correct cs
25090@@ -313,7 +344,7 @@ ENDPROC(start_cpu0)
25091 .quad INIT_PER_CPU_VAR(irq_stack_union)
25092
25093 GLOBAL(stack_start)
25094- .quad init_thread_union+THREAD_SIZE-8
25095+ .quad init_thread_union+THREAD_SIZE-16
25096 .word 0
25097 __FINITDATA
25098
25099@@ -391,7 +422,7 @@ ENTRY(early_idt_handler)
25100 call dump_stack
25101 #ifdef CONFIG_KALLSYMS
25102 leaq early_idt_ripmsg(%rip),%rdi
25103- movq 40(%rsp),%rsi # %rip again
25104+ movq 88(%rsp),%rsi # %rip again
25105 call __print_symbol
25106 #endif
25107 #endif /* EARLY_PRINTK */
25108@@ -420,6 +451,7 @@ ENDPROC(early_idt_handler)
25109 early_recursion_flag:
25110 .long 0
25111
25112+ .section .rodata,"a",@progbits
25113 #ifdef CONFIG_EARLY_PRINTK
25114 early_idt_msg:
25115 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
25116@@ -447,29 +479,52 @@ NEXT_PAGE(early_level4_pgt)
25117 NEXT_PAGE(early_dynamic_pgts)
25118 .fill 512*EARLY_DYNAMIC_PAGE_TABLES,8,0
25119
25120- .data
25121+ .section .rodata,"a",@progbits
25122
25123-#ifndef CONFIG_XEN
25124 NEXT_PAGE(init_level4_pgt)
25125- .fill 512,8,0
25126-#else
25127-NEXT_PAGE(init_level4_pgt)
25128- .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
25129 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
25130 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
25131+ .org init_level4_pgt + L4_VMALLOC_START*8, 0
25132+ .quad level3_vmalloc_start_pgt - __START_KERNEL_map + _KERNPG_TABLE
25133+ .org init_level4_pgt + L4_VMALLOC_END*8, 0
25134+ .quad level3_vmalloc_end_pgt - __START_KERNEL_map + _KERNPG_TABLE
25135+ .org init_level4_pgt + L4_VMEMMAP_START*8, 0
25136+ .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
25137 .org init_level4_pgt + L4_START_KERNEL*8, 0
25138 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
25139 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
25140
25141+#ifdef CONFIG_PAX_PER_CPU_PGD
25142+NEXT_PAGE(cpu_pgd)
25143+ .rept 2*NR_CPUS
25144+ .fill 512,8,0
25145+ .endr
25146+#endif
25147+
25148 NEXT_PAGE(level3_ident_pgt)
25149 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
25150+#ifdef CONFIG_XEN
25151 .fill 511, 8, 0
25152+#else
25153+ .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
25154+ .fill 510,8,0
25155+#endif
25156+
25157+NEXT_PAGE(level3_vmalloc_start_pgt)
25158+ .fill 512,8,0
25159+
25160+NEXT_PAGE(level3_vmalloc_end_pgt)
25161+ .fill 512,8,0
25162+
25163+NEXT_PAGE(level3_vmemmap_pgt)
25164+ .fill L3_VMEMMAP_START,8,0
25165+ .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
25166+
25167 NEXT_PAGE(level2_ident_pgt)
25168- /* Since I easily can, map the first 1G.
25169+ /* Since I easily can, map the first 2G.
25170 * Don't set NX because code runs from these pages.
25171 */
25172- PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
25173-#endif
25174+ PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
25175
25176 NEXT_PAGE(level3_kernel_pgt)
25177 .fill L3_START_KERNEL,8,0
25178@@ -477,6 +532,9 @@ NEXT_PAGE(level3_kernel_pgt)
25179 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
25180 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
25181
25182+NEXT_PAGE(level2_vmemmap_pgt)
25183+ .fill 512,8,0
25184+
25185 NEXT_PAGE(level2_kernel_pgt)
25186 /*
25187 * 512 MB kernel mapping. We spend a full page on this pagetable
25188@@ -494,28 +552,64 @@ NEXT_PAGE(level2_kernel_pgt)
25189 NEXT_PAGE(level2_fixmap_pgt)
25190 .fill 506,8,0
25191 .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
25192- /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
25193- .fill 5,8,0
25194+ .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
25195+ /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
25196+ .fill 4,8,0
25197
25198 NEXT_PAGE(level1_fixmap_pgt)
25199 .fill 512,8,0
25200
25201+NEXT_PAGE(level1_vsyscall_pgt)
25202+ .fill 512,8,0
25203+
25204 #undef PMDS
25205
25206- .data
25207+ .align PAGE_SIZE
25208+ENTRY(cpu_gdt_table)
25209+ .rept NR_CPUS
25210+ .quad 0x0000000000000000 /* NULL descriptor */
25211+ .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
25212+ .quad 0x00af9b000000ffff /* __KERNEL_CS */
25213+ .quad 0x00cf93000000ffff /* __KERNEL_DS */
25214+ .quad 0x00cffb000000ffff /* __USER32_CS */
25215+ .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
25216+ .quad 0x00affb000000ffff /* __USER_CS */
25217+
25218+#ifdef CONFIG_PAX_KERNEXEC
25219+ .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */
25220+#else
25221+ .quad 0x0 /* unused */
25222+#endif
25223+
25224+ .quad 0,0 /* TSS */
25225+ .quad 0,0 /* LDT */
25226+ .quad 0,0,0 /* three TLS descriptors */
25227+ .quad 0x0000f40000000000 /* node/CPU stored in limit */
25228+ /* asm/segment.h:GDT_ENTRIES must match this */
25229+
25230+#ifdef CONFIG_PAX_MEMORY_UDEREF
25231+ .quad 0x00cf93000000ffff /* __UDEREF_KERNEL_DS */
25232+#else
25233+ .quad 0x0 /* unused */
25234+#endif
25235+
25236+ /* zero the remaining page */
25237+ .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
25238+ .endr
25239+
25240 .align 16
25241 .globl early_gdt_descr
25242 early_gdt_descr:
25243 .word GDT_ENTRIES*8-1
25244 early_gdt_descr_base:
25245- .quad INIT_PER_CPU_VAR(gdt_page)
25246+ .quad cpu_gdt_table
25247
25248 ENTRY(phys_base)
25249 /* This must match the first entry in level2_kernel_pgt */
25250 .quad 0x0000000000000000
25251
25252 #include "../../x86/xen/xen-head.S"
25253-
25254- __PAGE_ALIGNED_BSS
25255+
25256+ .section .rodata,"a",@progbits
25257 NEXT_PAGE(empty_zero_page)
25258 .skip PAGE_SIZE
25259diff --git a/arch/x86/kernel/i386_ksyms_32.c b/arch/x86/kernel/i386_ksyms_32.c
25260index 05fd74f..c3548b1 100644
25261--- a/arch/x86/kernel/i386_ksyms_32.c
25262+++ b/arch/x86/kernel/i386_ksyms_32.c
25263@@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
25264 EXPORT_SYMBOL(cmpxchg8b_emu);
25265 #endif
25266
25267+EXPORT_SYMBOL_GPL(cpu_gdt_table);
25268+
25269 /* Networking helper routines. */
25270 EXPORT_SYMBOL(csum_partial_copy_generic);
25271+EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
25272+EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
25273
25274 EXPORT_SYMBOL(__get_user_1);
25275 EXPORT_SYMBOL(__get_user_2);
25276@@ -44,3 +48,11 @@ EXPORT_SYMBOL(___preempt_schedule);
25277 EXPORT_SYMBOL(___preempt_schedule_context);
25278 #endif
25279 #endif
25280+
25281+#ifdef CONFIG_PAX_KERNEXEC
25282+EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
25283+#endif
25284+
25285+#ifdef CONFIG_PAX_PER_CPU_PGD
25286+EXPORT_SYMBOL(cpu_pgd);
25287+#endif
25288diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c
25289index d5dd808..b6432cf 100644
25290--- a/arch/x86/kernel/i387.c
25291+++ b/arch/x86/kernel/i387.c
25292@@ -51,7 +51,7 @@ static inline bool interrupted_kernel_fpu_idle(void)
25293 static inline bool interrupted_user_mode(void)
25294 {
25295 struct pt_regs *regs = get_irq_regs();
25296- return regs && user_mode_vm(regs);
25297+ return regs && user_mode(regs);
25298 }
25299
25300 /*
25301diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
25302index 2e977b5..5f2c273 100644
25303--- a/arch/x86/kernel/i8259.c
25304+++ b/arch/x86/kernel/i8259.c
25305@@ -110,7 +110,7 @@ static int i8259A_irq_pending(unsigned int irq)
25306 static void make_8259A_irq(unsigned int irq)
25307 {
25308 disable_irq_nosync(irq);
25309- io_apic_irqs &= ~(1<<irq);
25310+ io_apic_irqs &= ~(1UL<<irq);
25311 irq_set_chip_and_handler_name(irq, &i8259A_chip, handle_level_irq,
25312 i8259A_chip.name);
25313 enable_irq(irq);
25314@@ -209,7 +209,7 @@ spurious_8259A_irq:
25315 "spurious 8259A interrupt: IRQ%d.\n", irq);
25316 spurious_irq_mask |= irqmask;
25317 }
25318- atomic_inc(&irq_err_count);
25319+ atomic_inc_unchecked(&irq_err_count);
25320 /*
25321 * Theoretically we do not have to handle this IRQ,
25322 * but in Linux this does not cause problems and is
25323@@ -332,14 +332,16 @@ static void init_8259A(int auto_eoi)
25324 /* (slave's support for AEOI in flat mode is to be investigated) */
25325 outb_pic(SLAVE_ICW4_DEFAULT, PIC_SLAVE_IMR);
25326
25327+ pax_open_kernel();
25328 if (auto_eoi)
25329 /*
25330 * In AEOI mode we just have to mask the interrupt
25331 * when acking.
25332 */
25333- i8259A_chip.irq_mask_ack = disable_8259A_irq;
25334+ *(void **)&i8259A_chip.irq_mask_ack = disable_8259A_irq;
25335 else
25336- i8259A_chip.irq_mask_ack = mask_and_ack_8259A;
25337+ *(void **)&i8259A_chip.irq_mask_ack = mask_and_ack_8259A;
25338+ pax_close_kernel();
25339
25340 udelay(100); /* wait for 8259A to initialize */
25341
25342diff --git a/arch/x86/kernel/io_delay.c b/arch/x86/kernel/io_delay.c
25343index a979b5b..1d6db75 100644
25344--- a/arch/x86/kernel/io_delay.c
25345+++ b/arch/x86/kernel/io_delay.c
25346@@ -58,7 +58,7 @@ static int __init dmi_io_delay_0xed_port(const struct dmi_system_id *id)
25347 * Quirk table for systems that misbehave (lock up, etc.) if port
25348 * 0x80 is used:
25349 */
25350-static struct dmi_system_id __initdata io_delay_0xed_port_dmi_table[] = {
25351+static const struct dmi_system_id __initconst io_delay_0xed_port_dmi_table[] = {
25352 {
25353 .callback = dmi_io_delay_0xed_port,
25354 .ident = "Compaq Presario V6000",
25355diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c
25356index 4ddaf66..49d5c18 100644
25357--- a/arch/x86/kernel/ioport.c
25358+++ b/arch/x86/kernel/ioport.c
25359@@ -6,6 +6,7 @@
25360 #include <linux/sched.h>
25361 #include <linux/kernel.h>
25362 #include <linux/capability.h>
25363+#include <linux/security.h>
25364 #include <linux/errno.h>
25365 #include <linux/types.h>
25366 #include <linux/ioport.h>
25367@@ -30,6 +31,12 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
25368 return -EINVAL;
25369 if (turn_on && !capable(CAP_SYS_RAWIO))
25370 return -EPERM;
25371+#ifdef CONFIG_GRKERNSEC_IO
25372+ if (turn_on && grsec_disable_privio) {
25373+ gr_handle_ioperm();
25374+ return -ENODEV;
25375+ }
25376+#endif
25377
25378 /*
25379 * If it's the first ioperm() call in this thread's lifetime, set the
25380@@ -54,7 +61,7 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
25381 * because the ->io_bitmap_max value must match the bitmap
25382 * contents:
25383 */
25384- tss = &per_cpu(init_tss, get_cpu());
25385+ tss = init_tss + get_cpu();
25386
25387 if (turn_on)
25388 bitmap_clear(t->io_bitmap_ptr, from, num);
25389@@ -105,6 +112,12 @@ SYSCALL_DEFINE1(iopl, unsigned int, level)
25390 if (level > old) {
25391 if (!capable(CAP_SYS_RAWIO))
25392 return -EPERM;
25393+#ifdef CONFIG_GRKERNSEC_IO
25394+ if (grsec_disable_privio) {
25395+ gr_handle_iopl();
25396+ return -ENODEV;
25397+ }
25398+#endif
25399 }
25400 regs->flags = (regs->flags & ~X86_EFLAGS_IOPL) | (level << 12);
25401 t->iopl = level << 12;
25402diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
25403index d99f31d..1c0f466 100644
25404--- a/arch/x86/kernel/irq.c
25405+++ b/arch/x86/kernel/irq.c
25406@@ -21,7 +21,7 @@
25407 #define CREATE_TRACE_POINTS
25408 #include <asm/trace/irq_vectors.h>
25409
25410-atomic_t irq_err_count;
25411+atomic_unchecked_t irq_err_count;
25412
25413 /* Function pointer for generic interrupt vector handling */
25414 void (*x86_platform_ipi_callback)(void) = NULL;
25415@@ -125,9 +125,9 @@ int arch_show_interrupts(struct seq_file *p, int prec)
25416 seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
25417 seq_printf(p, " Machine check polls\n");
25418 #endif
25419- seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
25420+ seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
25421 #if defined(CONFIG_X86_IO_APIC)
25422- seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
25423+ seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
25424 #endif
25425 return 0;
25426 }
25427@@ -167,7 +167,7 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
25428
25429 u64 arch_irq_stat(void)
25430 {
25431- u64 sum = atomic_read(&irq_err_count);
25432+ u64 sum = atomic_read_unchecked(&irq_err_count);
25433 return sum;
25434 }
25435
25436diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
25437index d7fcbed..96e715a 100644
25438--- a/arch/x86/kernel/irq_32.c
25439+++ b/arch/x86/kernel/irq_32.c
25440@@ -29,6 +29,8 @@ EXPORT_PER_CPU_SYMBOL(irq_regs);
25441
25442 #ifdef CONFIG_DEBUG_STACKOVERFLOW
25443
25444+extern void gr_handle_kernel_exploit(void);
25445+
25446 int sysctl_panic_on_stackoverflow __read_mostly;
25447
25448 /* Debugging check for stack overflow: is there less than 1KB free? */
25449@@ -39,13 +41,14 @@ static int check_stack_overflow(void)
25450 __asm__ __volatile__("andl %%esp,%0" :
25451 "=r" (sp) : "0" (THREAD_SIZE - 1));
25452
25453- return sp < (sizeof(struct thread_info) + STACK_WARN);
25454+ return sp < STACK_WARN;
25455 }
25456
25457 static void print_stack_overflow(void)
25458 {
25459 printk(KERN_WARNING "low stack detected by irq handler\n");
25460 dump_stack();
25461+ gr_handle_kernel_exploit();
25462 if (sysctl_panic_on_stackoverflow)
25463 panic("low stack detected by irq handler - check messages\n");
25464 }
25465@@ -59,8 +62,8 @@ static inline void print_stack_overflow(void) { }
25466 * per-CPU IRQ handling contexts (thread information and stack)
25467 */
25468 union irq_ctx {
25469- struct thread_info tinfo;
25470- u32 stack[THREAD_SIZE/sizeof(u32)];
25471+ unsigned long previous_esp;
25472+ u32 stack[THREAD_SIZE/sizeof(u32)];
25473 } __attribute__((aligned(THREAD_SIZE)));
25474
25475 static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
25476@@ -80,10 +83,9 @@ static void call_on_stack(void *func, void *stack)
25477 static inline int
25478 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
25479 {
25480- union irq_ctx *curctx, *irqctx;
25481+ union irq_ctx *irqctx;
25482 u32 *isp, arg1, arg2;
25483
25484- curctx = (union irq_ctx *) current_thread_info();
25485 irqctx = __this_cpu_read(hardirq_ctx);
25486
25487 /*
25488@@ -92,13 +94,16 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
25489 * handler) we can't do that and just have to keep using the
25490 * current stack (which is the irq stack already after all)
25491 */
25492- if (unlikely(curctx == irqctx))
25493+ if (unlikely((void *)current_stack_pointer - (void *)irqctx < THREAD_SIZE))
25494 return 0;
25495
25496 /* build the stack frame on the IRQ stack */
25497- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
25498- irqctx->tinfo.task = curctx->tinfo.task;
25499- irqctx->tinfo.previous_esp = current_stack_pointer;
25500+ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
25501+ irqctx->previous_esp = current_stack_pointer;
25502+
25503+#ifdef CONFIG_PAX_MEMORY_UDEREF
25504+ __set_fs(MAKE_MM_SEG(0));
25505+#endif
25506
25507 if (unlikely(overflow))
25508 call_on_stack(print_stack_overflow, isp);
25509@@ -110,6 +115,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
25510 : "0" (irq), "1" (desc), "2" (isp),
25511 "D" (desc->handle_irq)
25512 : "memory", "cc", "ecx");
25513+
25514+#ifdef CONFIG_PAX_MEMORY_UDEREF
25515+ __set_fs(current_thread_info()->addr_limit);
25516+#endif
25517+
25518 return 1;
25519 }
25520
25521@@ -118,48 +128,34 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
25522 */
25523 void irq_ctx_init(int cpu)
25524 {
25525- union irq_ctx *irqctx;
25526-
25527 if (per_cpu(hardirq_ctx, cpu))
25528 return;
25529
25530- irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
25531- THREADINFO_GFP,
25532- THREAD_SIZE_ORDER));
25533- memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
25534- irqctx->tinfo.cpu = cpu;
25535- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
25536-
25537- per_cpu(hardirq_ctx, cpu) = irqctx;
25538-
25539- irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
25540- THREADINFO_GFP,
25541- THREAD_SIZE_ORDER));
25542- memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
25543- irqctx->tinfo.cpu = cpu;
25544- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
25545-
25546- per_cpu(softirq_ctx, cpu) = irqctx;
25547-
25548- printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
25549- cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
25550+ per_cpu(hardirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREADINFO_GFP, THREAD_SIZE_ORDER));
25551+ per_cpu(softirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREADINFO_GFP, THREAD_SIZE_ORDER));
25552 }
25553
25554 void do_softirq_own_stack(void)
25555 {
25556- struct thread_info *curctx;
25557 union irq_ctx *irqctx;
25558 u32 *isp;
25559
25560- curctx = current_thread_info();
25561 irqctx = __this_cpu_read(softirq_ctx);
25562- irqctx->tinfo.task = curctx->task;
25563- irqctx->tinfo.previous_esp = current_stack_pointer;
25564+ irqctx->previous_esp = current_stack_pointer;
25565
25566 /* build the stack frame on the softirq stack */
25567- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
25568+ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
25569+
25570+#ifdef CONFIG_PAX_MEMORY_UDEREF
25571+ __set_fs(MAKE_MM_SEG(0));
25572+#endif
25573
25574 call_on_stack(__do_softirq, isp);
25575+
25576+#ifdef CONFIG_PAX_MEMORY_UDEREF
25577+ __set_fs(current_thread_info()->addr_limit);
25578+#endif
25579+
25580 }
25581
25582 bool handle_irq(unsigned irq, struct pt_regs *regs)
25583@@ -173,7 +169,7 @@ bool handle_irq(unsigned irq, struct pt_regs *regs)
25584 if (unlikely(!desc))
25585 return false;
25586
25587- if (user_mode_vm(regs) || !execute_on_irq_stack(overflow, desc, irq)) {
25588+ if (user_mode(regs) || !execute_on_irq_stack(overflow, desc, irq)) {
25589 if (unlikely(overflow))
25590 print_stack_overflow();
25591 desc->handle_irq(irq, desc);
25592diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
25593index 4d1c746..55a22d6 100644
25594--- a/arch/x86/kernel/irq_64.c
25595+++ b/arch/x86/kernel/irq_64.c
25596@@ -26,6 +26,8 @@ EXPORT_PER_CPU_SYMBOL(irq_stat);
25597 DEFINE_PER_CPU(struct pt_regs *, irq_regs);
25598 EXPORT_PER_CPU_SYMBOL(irq_regs);
25599
25600+extern void gr_handle_kernel_exploit(void);
25601+
25602 int sysctl_panic_on_stackoverflow;
25603
25604 /*
25605@@ -44,7 +46,7 @@ static inline void stack_overflow_check(struct pt_regs *regs)
25606 u64 estack_top, estack_bottom;
25607 u64 curbase = (u64)task_stack_page(current);
25608
25609- if (user_mode_vm(regs))
25610+ if (user_mode(regs))
25611 return;
25612
25613 if (regs->sp >= curbase + sizeof(struct thread_info) +
25614@@ -69,6 +71,8 @@ static inline void stack_overflow_check(struct pt_regs *regs)
25615 irq_stack_top, irq_stack_bottom,
25616 estack_top, estack_bottom);
25617
25618+ gr_handle_kernel_exploit();
25619+
25620 if (sysctl_panic_on_stackoverflow)
25621 panic("low stack detected by irq handler - check messages\n");
25622 #endif
25623diff --git a/arch/x86/kernel/jump_label.c b/arch/x86/kernel/jump_label.c
25624index 26d5a55..a01160a 100644
25625--- a/arch/x86/kernel/jump_label.c
25626+++ b/arch/x86/kernel/jump_label.c
25627@@ -51,7 +51,7 @@ static void __jump_label_transform(struct jump_entry *entry,
25628 * Jump label is enabled for the first time.
25629 * So we expect a default_nop...
25630 */
25631- if (unlikely(memcmp((void *)entry->code, default_nop, 5)
25632+ if (unlikely(memcmp((void *)ktla_ktva(entry->code), default_nop, 5)
25633 != 0))
25634 bug_at((void *)entry->code, __LINE__);
25635 } else {
25636@@ -59,7 +59,7 @@ static void __jump_label_transform(struct jump_entry *entry,
25637 * ...otherwise expect an ideal_nop. Otherwise
25638 * something went horribly wrong.
25639 */
25640- if (unlikely(memcmp((void *)entry->code, ideal_nop, 5)
25641+ if (unlikely(memcmp((void *)ktla_ktva(entry->code), ideal_nop, 5)
25642 != 0))
25643 bug_at((void *)entry->code, __LINE__);
25644 }
25645@@ -75,13 +75,13 @@ static void __jump_label_transform(struct jump_entry *entry,
25646 * are converting the default nop to the ideal nop.
25647 */
25648 if (init) {
25649- if (unlikely(memcmp((void *)entry->code, default_nop, 5) != 0))
25650+ if (unlikely(memcmp((void *)ktla_ktva(entry->code), default_nop, 5) != 0))
25651 bug_at((void *)entry->code, __LINE__);
25652 } else {
25653 code.jump = 0xe9;
25654 code.offset = entry->target -
25655 (entry->code + JUMP_LABEL_NOP_SIZE);
25656- if (unlikely(memcmp((void *)entry->code, &code, 5) != 0))
25657+ if (unlikely(memcmp((void *)ktla_ktva(entry->code), &code, 5) != 0))
25658 bug_at((void *)entry->code, __LINE__);
25659 }
25660 memcpy(&code, ideal_nops[NOP_ATOMIC5], JUMP_LABEL_NOP_SIZE);
25661diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
25662index 7ec1d5f..5a7d130 100644
25663--- a/arch/x86/kernel/kgdb.c
25664+++ b/arch/x86/kernel/kgdb.c
25665@@ -126,11 +126,11 @@ char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
25666 #ifdef CONFIG_X86_32
25667 switch (regno) {
25668 case GDB_SS:
25669- if (!user_mode_vm(regs))
25670+ if (!user_mode(regs))
25671 *(unsigned long *)mem = __KERNEL_DS;
25672 break;
25673 case GDB_SP:
25674- if (!user_mode_vm(regs))
25675+ if (!user_mode(regs))
25676 *(unsigned long *)mem = kernel_stack_pointer(regs);
25677 break;
25678 case GDB_GS:
25679@@ -228,7 +228,10 @@ static void kgdb_correct_hw_break(void)
25680 bp->attr.bp_addr = breakinfo[breakno].addr;
25681 bp->attr.bp_len = breakinfo[breakno].len;
25682 bp->attr.bp_type = breakinfo[breakno].type;
25683- info->address = breakinfo[breakno].addr;
25684+ if (breakinfo[breakno].type == X86_BREAKPOINT_EXECUTE)
25685+ info->address = ktla_ktva(breakinfo[breakno].addr);
25686+ else
25687+ info->address = breakinfo[breakno].addr;
25688 info->len = breakinfo[breakno].len;
25689 info->type = breakinfo[breakno].type;
25690 val = arch_install_hw_breakpoint(bp);
25691@@ -475,12 +478,12 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
25692 case 'k':
25693 /* clear the trace bit */
25694 linux_regs->flags &= ~X86_EFLAGS_TF;
25695- atomic_set(&kgdb_cpu_doing_single_step, -1);
25696+ atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
25697
25698 /* set the trace bit if we're stepping */
25699 if (remcomInBuffer[0] == 's') {
25700 linux_regs->flags |= X86_EFLAGS_TF;
25701- atomic_set(&kgdb_cpu_doing_single_step,
25702+ atomic_set_unchecked(&kgdb_cpu_doing_single_step,
25703 raw_smp_processor_id());
25704 }
25705
25706@@ -545,7 +548,7 @@ static int __kgdb_notify(struct die_args *args, unsigned long cmd)
25707
25708 switch (cmd) {
25709 case DIE_DEBUG:
25710- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
25711+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
25712 if (user_mode(regs))
25713 return single_step_cont(regs, args);
25714 break;
25715@@ -750,11 +753,11 @@ int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
25716 #endif /* CONFIG_DEBUG_RODATA */
25717
25718 bpt->type = BP_BREAKPOINT;
25719- err = probe_kernel_read(bpt->saved_instr, (char *)bpt->bpt_addr,
25720+ err = probe_kernel_read(bpt->saved_instr, ktla_ktva((char *)bpt->bpt_addr),
25721 BREAK_INSTR_SIZE);
25722 if (err)
25723 return err;
25724- err = probe_kernel_write((char *)bpt->bpt_addr,
25725+ err = probe_kernel_write(ktla_ktva((char *)bpt->bpt_addr),
25726 arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE);
25727 #ifdef CONFIG_DEBUG_RODATA
25728 if (!err)
25729@@ -767,7 +770,7 @@ int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
25730 return -EBUSY;
25731 text_poke((void *)bpt->bpt_addr, arch_kgdb_ops.gdb_bpt_instr,
25732 BREAK_INSTR_SIZE);
25733- err = probe_kernel_read(opc, (char *)bpt->bpt_addr, BREAK_INSTR_SIZE);
25734+ err = probe_kernel_read(opc, ktla_ktva((char *)bpt->bpt_addr), BREAK_INSTR_SIZE);
25735 if (err)
25736 return err;
25737 if (memcmp(opc, arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE))
25738@@ -792,13 +795,13 @@ int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt)
25739 if (mutex_is_locked(&text_mutex))
25740 goto knl_write;
25741 text_poke((void *)bpt->bpt_addr, bpt->saved_instr, BREAK_INSTR_SIZE);
25742- err = probe_kernel_read(opc, (char *)bpt->bpt_addr, BREAK_INSTR_SIZE);
25743+ err = probe_kernel_read(opc, ktla_ktva((char *)bpt->bpt_addr), BREAK_INSTR_SIZE);
25744 if (err || memcmp(opc, bpt->saved_instr, BREAK_INSTR_SIZE))
25745 goto knl_write;
25746 return err;
25747 knl_write:
25748 #endif /* CONFIG_DEBUG_RODATA */
25749- return probe_kernel_write((char *)bpt->bpt_addr,
25750+ return probe_kernel_write(ktla_ktva((char *)bpt->bpt_addr),
25751 (char *)bpt->saved_instr, BREAK_INSTR_SIZE);
25752 }
25753
25754diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
25755index 79a3f96..6ba030a 100644
25756--- a/arch/x86/kernel/kprobes/core.c
25757+++ b/arch/x86/kernel/kprobes/core.c
25758@@ -119,9 +119,12 @@ static void __kprobes __synthesize_relative_insn(void *from, void *to, u8 op)
25759 s32 raddr;
25760 } __packed *insn;
25761
25762- insn = (struct __arch_relative_insn *)from;
25763+ insn = (struct __arch_relative_insn *)ktla_ktva(from);
25764+
25765+ pax_open_kernel();
25766 insn->raddr = (s32)((long)(to) - ((long)(from) + 5));
25767 insn->op = op;
25768+ pax_close_kernel();
25769 }
25770
25771 /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
25772@@ -164,7 +167,7 @@ int __kprobes can_boost(kprobe_opcode_t *opcodes)
25773 kprobe_opcode_t opcode;
25774 kprobe_opcode_t *orig_opcodes = opcodes;
25775
25776- if (search_exception_tables((unsigned long)opcodes))
25777+ if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
25778 return 0; /* Page fault may occur on this address. */
25779
25780 retry:
25781@@ -238,9 +241,9 @@ __recover_probed_insn(kprobe_opcode_t *buf, unsigned long addr)
25782 * for the first byte, we can recover the original instruction
25783 * from it and kp->opcode.
25784 */
25785- memcpy(buf, kp->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
25786+ memcpy(buf, ktla_ktva(kp->addr), MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
25787 buf[0] = kp->opcode;
25788- return (unsigned long)buf;
25789+ return ktva_ktla((unsigned long)buf);
25790 }
25791
25792 /*
25793@@ -332,7 +335,9 @@ int __kprobes __copy_instruction(u8 *dest, u8 *src)
25794 /* Another subsystem puts a breakpoint, failed to recover */
25795 if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION)
25796 return 0;
25797+ pax_open_kernel();
25798 memcpy(dest, insn.kaddr, insn.length);
25799+ pax_close_kernel();
25800
25801 #ifdef CONFIG_X86_64
25802 if (insn_rip_relative(&insn)) {
25803@@ -359,7 +364,9 @@ int __kprobes __copy_instruction(u8 *dest, u8 *src)
25804 return 0;
25805 }
25806 disp = (u8 *) dest + insn_offset_displacement(&insn);
25807+ pax_open_kernel();
25808 *(s32 *) disp = (s32) newdisp;
25809+ pax_close_kernel();
25810 }
25811 #endif
25812 return insn.length;
25813@@ -498,7 +505,7 @@ setup_singlestep(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *k
25814 * nor set current_kprobe, because it doesn't use single
25815 * stepping.
25816 */
25817- regs->ip = (unsigned long)p->ainsn.insn;
25818+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
25819 preempt_enable_no_resched();
25820 return;
25821 }
25822@@ -515,9 +522,9 @@ setup_singlestep(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *k
25823 regs->flags &= ~X86_EFLAGS_IF;
25824 /* single step inline if the instruction is an int3 */
25825 if (p->opcode == BREAKPOINT_INSTRUCTION)
25826- regs->ip = (unsigned long)p->addr;
25827+ regs->ip = ktla_ktva((unsigned long)p->addr);
25828 else
25829- regs->ip = (unsigned long)p->ainsn.insn;
25830+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
25831 }
25832
25833 /*
25834@@ -596,7 +603,7 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
25835 setup_singlestep(p, regs, kcb, 0);
25836 return 1;
25837 }
25838- } else if (*addr != BREAKPOINT_INSTRUCTION) {
25839+ } else if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
25840 /*
25841 * The breakpoint instruction was removed right
25842 * after we hit it. Another cpu has removed
25843@@ -642,6 +649,9 @@ static void __used __kprobes kretprobe_trampoline_holder(void)
25844 " movq %rax, 152(%rsp)\n"
25845 RESTORE_REGS_STRING
25846 " popfq\n"
25847+#ifdef KERNEXEC_PLUGIN
25848+ " btsq $63,(%rsp)\n"
25849+#endif
25850 #else
25851 " pushf\n"
25852 SAVE_REGS_STRING
25853@@ -779,7 +789,7 @@ static void __kprobes
25854 resume_execution(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb)
25855 {
25856 unsigned long *tos = stack_addr(regs);
25857- unsigned long copy_ip = (unsigned long)p->ainsn.insn;
25858+ unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
25859 unsigned long orig_ip = (unsigned long)p->addr;
25860 kprobe_opcode_t *insn = p->ainsn.insn;
25861
25862@@ -961,7 +971,7 @@ kprobe_exceptions_notify(struct notifier_block *self, unsigned long val, void *d
25863 struct die_args *args = data;
25864 int ret = NOTIFY_DONE;
25865
25866- if (args->regs && user_mode_vm(args->regs))
25867+ if (args->regs && user_mode(args->regs))
25868 return ret;
25869
25870 switch (val) {
25871diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c
25872index 898160b..758cde8 100644
25873--- a/arch/x86/kernel/kprobes/opt.c
25874+++ b/arch/x86/kernel/kprobes/opt.c
25875@@ -79,6 +79,7 @@ found:
25876 /* Insert a move instruction which sets a pointer to eax/rdi (1st arg). */
25877 static void __kprobes synthesize_set_arg1(kprobe_opcode_t *addr, unsigned long val)
25878 {
25879+ pax_open_kernel();
25880 #ifdef CONFIG_X86_64
25881 *addr++ = 0x48;
25882 *addr++ = 0xbf;
25883@@ -86,6 +87,7 @@ static void __kprobes synthesize_set_arg1(kprobe_opcode_t *addr, unsigned long v
25884 *addr++ = 0xb8;
25885 #endif
25886 *(unsigned long *)addr = val;
25887+ pax_close_kernel();
25888 }
25889
25890 asm (
25891@@ -335,7 +337,7 @@ int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
25892 * Verify if the address gap is in 2GB range, because this uses
25893 * a relative jump.
25894 */
25895- rel = (long)op->optinsn.insn - (long)op->kp.addr + RELATIVEJUMP_SIZE;
25896+ rel = (long)op->optinsn.insn - ktla_ktva((long)op->kp.addr) + RELATIVEJUMP_SIZE;
25897 if (abs(rel) > 0x7fffffff)
25898 return -ERANGE;
25899
25900@@ -350,16 +352,18 @@ int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
25901 op->optinsn.size = ret;
25902
25903 /* Copy arch-dep-instance from template */
25904- memcpy(buf, &optprobe_template_entry, TMPL_END_IDX);
25905+ pax_open_kernel();
25906+ memcpy(buf, ktla_ktva(&optprobe_template_entry), TMPL_END_IDX);
25907+ pax_close_kernel();
25908
25909 /* Set probe information */
25910 synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op);
25911
25912 /* Set probe function call */
25913- synthesize_relcall(buf + TMPL_CALL_IDX, optimized_callback);
25914+ synthesize_relcall(ktva_ktla(buf) + TMPL_CALL_IDX, optimized_callback);
25915
25916 /* Set returning jmp instruction at the tail of out-of-line buffer */
25917- synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size,
25918+ synthesize_reljump(ktva_ktla(buf) + TMPL_END_IDX + op->optinsn.size,
25919 (u8 *)op->kp.addr + op->optinsn.size);
25920
25921 flush_icache_range((unsigned long) buf,
25922@@ -384,7 +388,7 @@ void __kprobes arch_optimize_kprobes(struct list_head *oplist)
25923 WARN_ON(kprobe_disabled(&op->kp));
25924
25925 /* Backup instructions which will be replaced by jump address */
25926- memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE,
25927+ memcpy(op->optinsn.copied_insn, ktla_ktva(op->kp.addr) + INT3_SIZE,
25928 RELATIVE_ADDR_SIZE);
25929
25930 insn_buf[0] = RELATIVEJUMP_OPCODE;
25931@@ -433,7 +437,7 @@ setup_detour_execution(struct kprobe *p, struct pt_regs *regs, int reenter)
25932 /* This kprobe is really able to run optimized path. */
25933 op = container_of(p, struct optimized_kprobe, kp);
25934 /* Detour through copied instructions */
25935- regs->ip = (unsigned long)op->optinsn.insn + TMPL_END_IDX;
25936+ regs->ip = ktva_ktla((unsigned long)op->optinsn.insn) + TMPL_END_IDX;
25937 if (!reenter)
25938 reset_current_kprobe();
25939 preempt_enable_no_resched();
25940diff --git a/arch/x86/kernel/ksysfs.c b/arch/x86/kernel/ksysfs.c
25941index c2bedae..25e7ab6 100644
25942--- a/arch/x86/kernel/ksysfs.c
25943+++ b/arch/x86/kernel/ksysfs.c
25944@@ -184,7 +184,7 @@ out:
25945
25946 static struct kobj_attribute type_attr = __ATTR_RO(type);
25947
25948-static struct bin_attribute data_attr = {
25949+static bin_attribute_no_const data_attr __read_only = {
25950 .attr = {
25951 .name = "data",
25952 .mode = S_IRUGO,
25953diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
25954index dcbbaa1..81ae763 100644
25955--- a/arch/x86/kernel/ldt.c
25956+++ b/arch/x86/kernel/ldt.c
25957@@ -68,13 +68,13 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
25958 if (reload) {
25959 #ifdef CONFIG_SMP
25960 preempt_disable();
25961- load_LDT(pc);
25962+ load_LDT_nolock(pc);
25963 if (!cpumask_equal(mm_cpumask(current->mm),
25964 cpumask_of(smp_processor_id())))
25965 smp_call_function(flush_ldt, current->mm, 1);
25966 preempt_enable();
25967 #else
25968- load_LDT(pc);
25969+ load_LDT_nolock(pc);
25970 #endif
25971 }
25972 if (oldsize) {
25973@@ -96,7 +96,7 @@ static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
25974 return err;
25975
25976 for (i = 0; i < old->size; i++)
25977- write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
25978+ write_ldt_entry(new->ldt, i, old->ldt + i);
25979 return 0;
25980 }
25981
25982@@ -117,6 +117,24 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
25983 retval = copy_ldt(&mm->context, &old_mm->context);
25984 mutex_unlock(&old_mm->context.lock);
25985 }
25986+
25987+ if (tsk == current) {
25988+ mm->context.vdso = 0;
25989+
25990+#ifdef CONFIG_X86_32
25991+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
25992+ mm->context.user_cs_base = 0UL;
25993+ mm->context.user_cs_limit = ~0UL;
25994+
25995+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
25996+ cpus_clear(mm->context.cpu_user_cs_mask);
25997+#endif
25998+
25999+#endif
26000+#endif
26001+
26002+ }
26003+
26004 return retval;
26005 }
26006
26007@@ -231,6 +249,13 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
26008 }
26009 }
26010
26011+#ifdef CONFIG_PAX_SEGMEXEC
26012+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
26013+ error = -EINVAL;
26014+ goto out_unlock;
26015+ }
26016+#endif
26017+
26018 /*
26019 * On x86-64 we do not support 16-bit segments due to
26020 * IRET leaking the high bits of the kernel stack address.
26021diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c
26022index 1667b1d..16492c5 100644
26023--- a/arch/x86/kernel/machine_kexec_32.c
26024+++ b/arch/x86/kernel/machine_kexec_32.c
26025@@ -25,7 +25,7 @@
26026 #include <asm/cacheflush.h>
26027 #include <asm/debugreg.h>
26028
26029-static void set_idt(void *newidt, __u16 limit)
26030+static void set_idt(struct desc_struct *newidt, __u16 limit)
26031 {
26032 struct desc_ptr curidt;
26033
26034@@ -37,7 +37,7 @@ static void set_idt(void *newidt, __u16 limit)
26035 }
26036
26037
26038-static void set_gdt(void *newgdt, __u16 limit)
26039+static void set_gdt(struct desc_struct *newgdt, __u16 limit)
26040 {
26041 struct desc_ptr curgdt;
26042
26043@@ -215,7 +215,7 @@ void machine_kexec(struct kimage *image)
26044 }
26045
26046 control_page = page_address(image->control_code_page);
26047- memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
26048+ memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
26049
26050 relocate_kernel_ptr = control_page;
26051 page_list[PA_CONTROL_PAGE] = __pa(control_page);
26052diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
26053index 18be189..4a9fe40 100644
26054--- a/arch/x86/kernel/module.c
26055+++ b/arch/x86/kernel/module.c
26056@@ -43,15 +43,60 @@ do { \
26057 } while (0)
26058 #endif
26059
26060-void *module_alloc(unsigned long size)
26061+static inline void *__module_alloc(unsigned long size, pgprot_t prot)
26062 {
26063- if (PAGE_ALIGN(size) > MODULES_LEN)
26064+ if (!size || PAGE_ALIGN(size) > MODULES_LEN)
26065 return NULL;
26066 return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
26067- GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
26068+ GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, prot,
26069 NUMA_NO_NODE, __builtin_return_address(0));
26070 }
26071
26072+void *module_alloc(unsigned long size)
26073+{
26074+
26075+#ifdef CONFIG_PAX_KERNEXEC
26076+ return __module_alloc(size, PAGE_KERNEL);
26077+#else
26078+ return __module_alloc(size, PAGE_KERNEL_EXEC);
26079+#endif
26080+
26081+}
26082+
26083+#ifdef CONFIG_PAX_KERNEXEC
26084+#ifdef CONFIG_X86_32
26085+void *module_alloc_exec(unsigned long size)
26086+{
26087+ struct vm_struct *area;
26088+
26089+ if (size == 0)
26090+ return NULL;
26091+
26092+ area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
26093+ return area ? area->addr : NULL;
26094+}
26095+EXPORT_SYMBOL(module_alloc_exec);
26096+
26097+void module_free_exec(struct module *mod, void *module_region)
26098+{
26099+ vunmap(module_region);
26100+}
26101+EXPORT_SYMBOL(module_free_exec);
26102+#else
26103+void module_free_exec(struct module *mod, void *module_region)
26104+{
26105+ module_free(mod, module_region);
26106+}
26107+EXPORT_SYMBOL(module_free_exec);
26108+
26109+void *module_alloc_exec(unsigned long size)
26110+{
26111+ return __module_alloc(size, PAGE_KERNEL_RX);
26112+}
26113+EXPORT_SYMBOL(module_alloc_exec);
26114+#endif
26115+#endif
26116+
26117 #ifdef CONFIG_X86_32
26118 int apply_relocate(Elf32_Shdr *sechdrs,
26119 const char *strtab,
26120@@ -62,14 +107,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
26121 unsigned int i;
26122 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
26123 Elf32_Sym *sym;
26124- uint32_t *location;
26125+ uint32_t *plocation, location;
26126
26127 DEBUGP("Applying relocate section %u to %u\n",
26128 relsec, sechdrs[relsec].sh_info);
26129 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
26130 /* This is where to make the change */
26131- location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
26132- + rel[i].r_offset;
26133+ plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
26134+ location = (uint32_t)plocation;
26135+ if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
26136+ plocation = ktla_ktva((void *)plocation);
26137 /* This is the symbol it is referring to. Note that all
26138 undefined symbols have been resolved. */
26139 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
26140@@ -78,11 +125,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
26141 switch (ELF32_R_TYPE(rel[i].r_info)) {
26142 case R_386_32:
26143 /* We add the value into the location given */
26144- *location += sym->st_value;
26145+ pax_open_kernel();
26146+ *plocation += sym->st_value;
26147+ pax_close_kernel();
26148 break;
26149 case R_386_PC32:
26150 /* Add the value, subtract its position */
26151- *location += sym->st_value - (uint32_t)location;
26152+ pax_open_kernel();
26153+ *plocation += sym->st_value - location;
26154+ pax_close_kernel();
26155 break;
26156 default:
26157 pr_err("%s: Unknown relocation: %u\n",
26158@@ -127,21 +178,30 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
26159 case R_X86_64_NONE:
26160 break;
26161 case R_X86_64_64:
26162+ pax_open_kernel();
26163 *(u64 *)loc = val;
26164+ pax_close_kernel();
26165 break;
26166 case R_X86_64_32:
26167+ pax_open_kernel();
26168 *(u32 *)loc = val;
26169+ pax_close_kernel();
26170 if (val != *(u32 *)loc)
26171 goto overflow;
26172 break;
26173 case R_X86_64_32S:
26174+ pax_open_kernel();
26175 *(s32 *)loc = val;
26176+ pax_close_kernel();
26177 if ((s64)val != *(s32 *)loc)
26178 goto overflow;
26179 break;
26180 case R_X86_64_PC32:
26181 val -= (u64)loc;
26182+ pax_open_kernel();
26183 *(u32 *)loc = val;
26184+ pax_close_kernel();
26185+
26186 #if 0
26187 if ((s64)val != *(s32 *)loc)
26188 goto overflow;
26189diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c
26190index 05266b5..3432443 100644
26191--- a/arch/x86/kernel/msr.c
26192+++ b/arch/x86/kernel/msr.c
26193@@ -37,6 +37,7 @@
26194 #include <linux/notifier.h>
26195 #include <linux/uaccess.h>
26196 #include <linux/gfp.h>
26197+#include <linux/grsecurity.h>
26198
26199 #include <asm/processor.h>
26200 #include <asm/msr.h>
26201@@ -103,6 +104,11 @@ static ssize_t msr_write(struct file *file, const char __user *buf,
26202 int err = 0;
26203 ssize_t bytes = 0;
26204
26205+#ifdef CONFIG_GRKERNSEC_KMEM
26206+ gr_handle_msr_write();
26207+ return -EPERM;
26208+#endif
26209+
26210 if (count % 8)
26211 return -EINVAL; /* Invalid chunk size */
26212
26213@@ -150,6 +156,10 @@ static long msr_ioctl(struct file *file, unsigned int ioc, unsigned long arg)
26214 err = -EBADF;
26215 break;
26216 }
26217+#ifdef CONFIG_GRKERNSEC_KMEM
26218+ gr_handle_msr_write();
26219+ return -EPERM;
26220+#endif
26221 if (copy_from_user(&regs, uregs, sizeof regs)) {
26222 err = -EFAULT;
26223 break;
26224@@ -233,7 +243,7 @@ static int msr_class_cpu_callback(struct notifier_block *nfb,
26225 return notifier_from_errno(err);
26226 }
26227
26228-static struct notifier_block __refdata msr_class_cpu_notifier = {
26229+static struct notifier_block msr_class_cpu_notifier = {
26230 .notifier_call = msr_class_cpu_callback,
26231 };
26232
26233diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
26234index 6fcb49c..5b3f4ff 100644
26235--- a/arch/x86/kernel/nmi.c
26236+++ b/arch/x86/kernel/nmi.c
26237@@ -138,7 +138,7 @@ static int __kprobes nmi_handle(unsigned int type, struct pt_regs *regs, bool b2
26238 return handled;
26239 }
26240
26241-int __register_nmi_handler(unsigned int type, struct nmiaction *action)
26242+int __register_nmi_handler(unsigned int type, const struct nmiaction *action)
26243 {
26244 struct nmi_desc *desc = nmi_to_desc(type);
26245 unsigned long flags;
26246@@ -162,9 +162,9 @@ int __register_nmi_handler(unsigned int type, struct nmiaction *action)
26247 * event confuses some handlers (kdump uses this flag)
26248 */
26249 if (action->flags & NMI_FLAG_FIRST)
26250- list_add_rcu(&action->list, &desc->head);
26251+ pax_list_add_rcu((struct list_head *)&action->list, &desc->head);
26252 else
26253- list_add_tail_rcu(&action->list, &desc->head);
26254+ pax_list_add_tail_rcu((struct list_head *)&action->list, &desc->head);
26255
26256 spin_unlock_irqrestore(&desc->lock, flags);
26257 return 0;
26258@@ -187,7 +187,7 @@ void unregister_nmi_handler(unsigned int type, const char *name)
26259 if (!strcmp(n->name, name)) {
26260 WARN(in_nmi(),
26261 "Trying to free NMI (%s) from NMI context!\n", n->name);
26262- list_del_rcu(&n->list);
26263+ pax_list_del_rcu((struct list_head *)&n->list);
26264 break;
26265 }
26266 }
26267@@ -512,6 +512,17 @@ static inline void nmi_nesting_postprocess(void)
26268 dotraplinkage notrace __kprobes void
26269 do_nmi(struct pt_regs *regs, long error_code)
26270 {
26271+
26272+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
26273+ if (!user_mode(regs)) {
26274+ unsigned long cs = regs->cs & 0xFFFF;
26275+ unsigned long ip = ktva_ktla(regs->ip);
26276+
26277+ if ((cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) && ip <= (unsigned long)_etext)
26278+ regs->ip = ip;
26279+ }
26280+#endif
26281+
26282 nmi_nesting_preprocess(regs);
26283
26284 nmi_enter();
26285diff --git a/arch/x86/kernel/nmi_selftest.c b/arch/x86/kernel/nmi_selftest.c
26286index 6d9582e..f746287 100644
26287--- a/arch/x86/kernel/nmi_selftest.c
26288+++ b/arch/x86/kernel/nmi_selftest.c
26289@@ -43,7 +43,7 @@ static void __init init_nmi_testsuite(void)
26290 {
26291 /* trap all the unknown NMIs we may generate */
26292 register_nmi_handler(NMI_UNKNOWN, nmi_unk_cb, 0, "nmi_selftest_unk",
26293- __initdata);
26294+ __initconst);
26295 }
26296
26297 static void __init cleanup_nmi_testsuite(void)
26298@@ -66,7 +66,7 @@ static void __init test_nmi_ipi(struct cpumask *mask)
26299 unsigned long timeout;
26300
26301 if (register_nmi_handler(NMI_LOCAL, test_nmi_ipi_callback,
26302- NMI_FLAG_FIRST, "nmi_selftest", __initdata)) {
26303+ NMI_FLAG_FIRST, "nmi_selftest", __initconst)) {
26304 nmi_fail = FAILURE;
26305 return;
26306 }
26307diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c
26308index bbb6c73..24a58ef 100644
26309--- a/arch/x86/kernel/paravirt-spinlocks.c
26310+++ b/arch/x86/kernel/paravirt-spinlocks.c
26311@@ -8,7 +8,7 @@
26312
26313 #include <asm/paravirt.h>
26314
26315-struct pv_lock_ops pv_lock_ops = {
26316+struct pv_lock_ops pv_lock_ops __read_only = {
26317 #ifdef CONFIG_SMP
26318 .lock_spinning = __PV_IS_CALLEE_SAVE(paravirt_nop),
26319 .unlock_kick = paravirt_nop,
26320diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
26321index 1b10af8..45bfbec 100644
26322--- a/arch/x86/kernel/paravirt.c
26323+++ b/arch/x86/kernel/paravirt.c
26324@@ -55,6 +55,9 @@ u64 _paravirt_ident_64(u64 x)
26325 {
26326 return x;
26327 }
26328+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
26329+PV_CALLEE_SAVE_REGS_THUNK(_paravirt_ident_64);
26330+#endif
26331
26332 void __init default_banner(void)
26333 {
26334@@ -141,16 +144,20 @@ unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
26335
26336 if (opfunc == NULL)
26337 /* If there's no function, patch it with a ud2a (BUG) */
26338- ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
26339- else if (opfunc == _paravirt_nop)
26340+ ret = paravirt_patch_insns(insnbuf, len, ktva_ktla(ud2a), ud2a+sizeof(ud2a));
26341+ else if (opfunc == (void *)_paravirt_nop)
26342 /* If the operation is a nop, then nop the callsite */
26343 ret = paravirt_patch_nop();
26344
26345 /* identity functions just return their single argument */
26346- else if (opfunc == _paravirt_ident_32)
26347+ else if (opfunc == (void *)_paravirt_ident_32)
26348 ret = paravirt_patch_ident_32(insnbuf, len);
26349- else if (opfunc == _paravirt_ident_64)
26350+ else if (opfunc == (void *)_paravirt_ident_64)
26351 ret = paravirt_patch_ident_64(insnbuf, len);
26352+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
26353+ else if (opfunc == (void *)__raw_callee_save__paravirt_ident_64)
26354+ ret = paravirt_patch_ident_64(insnbuf, len);
26355+#endif
26356
26357 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
26358 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
26359@@ -175,7 +182,7 @@ unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
26360 if (insn_len > len || start == NULL)
26361 insn_len = len;
26362 else
26363- memcpy(insnbuf, start, insn_len);
26364+ memcpy(insnbuf, ktla_ktva(start), insn_len);
26365
26366 return insn_len;
26367 }
26368@@ -299,7 +306,7 @@ enum paravirt_lazy_mode paravirt_get_lazy_mode(void)
26369 return this_cpu_read(paravirt_lazy_mode);
26370 }
26371
26372-struct pv_info pv_info = {
26373+struct pv_info pv_info __read_only = {
26374 .name = "bare hardware",
26375 .paravirt_enabled = 0,
26376 .kernel_rpl = 0,
26377@@ -310,16 +317,16 @@ struct pv_info pv_info = {
26378 #endif
26379 };
26380
26381-struct pv_init_ops pv_init_ops = {
26382+struct pv_init_ops pv_init_ops __read_only = {
26383 .patch = native_patch,
26384 };
26385
26386-struct pv_time_ops pv_time_ops = {
26387+struct pv_time_ops pv_time_ops __read_only = {
26388 .sched_clock = native_sched_clock,
26389 .steal_clock = native_steal_clock,
26390 };
26391
26392-__visible struct pv_irq_ops pv_irq_ops = {
26393+__visible struct pv_irq_ops pv_irq_ops __read_only = {
26394 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
26395 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
26396 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
26397@@ -331,7 +338,7 @@ __visible struct pv_irq_ops pv_irq_ops = {
26398 #endif
26399 };
26400
26401-__visible struct pv_cpu_ops pv_cpu_ops = {
26402+__visible struct pv_cpu_ops pv_cpu_ops __read_only = {
26403 .cpuid = native_cpuid,
26404 .get_debugreg = native_get_debugreg,
26405 .set_debugreg = native_set_debugreg,
26406@@ -389,21 +396,26 @@ __visible struct pv_cpu_ops pv_cpu_ops = {
26407 .end_context_switch = paravirt_nop,
26408 };
26409
26410-struct pv_apic_ops pv_apic_ops = {
26411+struct pv_apic_ops pv_apic_ops __read_only= {
26412 #ifdef CONFIG_X86_LOCAL_APIC
26413 .startup_ipi_hook = paravirt_nop,
26414 #endif
26415 };
26416
26417-#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
26418+#ifdef CONFIG_X86_32
26419+#ifdef CONFIG_X86_PAE
26420+/* 64-bit pagetable entries */
26421+#define PTE_IDENT PV_CALLEE_SAVE(_paravirt_ident_64)
26422+#else
26423 /* 32-bit pagetable entries */
26424 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
26425+#endif
26426 #else
26427 /* 64-bit pagetable entries */
26428 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
26429 #endif
26430
26431-struct pv_mmu_ops pv_mmu_ops = {
26432+struct pv_mmu_ops pv_mmu_ops __read_only = {
26433
26434 .read_cr2 = native_read_cr2,
26435 .write_cr2 = native_write_cr2,
26436@@ -453,6 +465,7 @@ struct pv_mmu_ops pv_mmu_ops = {
26437 .make_pud = PTE_IDENT,
26438
26439 .set_pgd = native_set_pgd,
26440+ .set_pgd_batched = native_set_pgd_batched,
26441 #endif
26442 #endif /* PAGETABLE_LEVELS >= 3 */
26443
26444@@ -473,6 +486,12 @@ struct pv_mmu_ops pv_mmu_ops = {
26445 },
26446
26447 .set_fixmap = native_set_fixmap,
26448+
26449+#ifdef CONFIG_PAX_KERNEXEC
26450+ .pax_open_kernel = native_pax_open_kernel,
26451+ .pax_close_kernel = native_pax_close_kernel,
26452+#endif
26453+
26454 };
26455
26456 EXPORT_SYMBOL_GPL(pv_time_ops);
26457diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c
26458index 299d493..2ccb0ee 100644
26459--- a/arch/x86/kernel/pci-calgary_64.c
26460+++ b/arch/x86/kernel/pci-calgary_64.c
26461@@ -1339,7 +1339,7 @@ static void __init get_tce_space_from_tar(void)
26462 tce_space = be64_to_cpu(readq(target));
26463 tce_space = tce_space & TAR_SW_BITS;
26464
26465- tce_space = tce_space & (~specified_table_size);
26466+ tce_space = tce_space & (~(unsigned long)specified_table_size);
26467 info->tce_space = (u64 *)__va(tce_space);
26468 }
26469 }
26470diff --git a/arch/x86/kernel/pci-iommu_table.c b/arch/x86/kernel/pci-iommu_table.c
26471index 35ccf75..7a15747 100644
26472--- a/arch/x86/kernel/pci-iommu_table.c
26473+++ b/arch/x86/kernel/pci-iommu_table.c
26474@@ -2,7 +2,7 @@
26475 #include <asm/iommu_table.h>
26476 #include <linux/string.h>
26477 #include <linux/kallsyms.h>
26478-
26479+#include <linux/sched.h>
26480
26481 #define DEBUG 1
26482
26483diff --git a/arch/x86/kernel/pci-swiotlb.c b/arch/x86/kernel/pci-swiotlb.c
26484index 6c483ba..d10ce2f 100644
26485--- a/arch/x86/kernel/pci-swiotlb.c
26486+++ b/arch/x86/kernel/pci-swiotlb.c
26487@@ -32,7 +32,7 @@ static void x86_swiotlb_free_coherent(struct device *dev, size_t size,
26488 void *vaddr, dma_addr_t dma_addr,
26489 struct dma_attrs *attrs)
26490 {
26491- swiotlb_free_coherent(dev, size, vaddr, dma_addr);
26492+ swiotlb_free_coherent(dev, size, vaddr, dma_addr, attrs);
26493 }
26494
26495 static struct dma_map_ops swiotlb_dma_ops = {
26496diff --git a/arch/x86/kernel/preempt.S b/arch/x86/kernel/preempt.S
26497index ca7f0d5..8996469 100644
26498--- a/arch/x86/kernel/preempt.S
26499+++ b/arch/x86/kernel/preempt.S
26500@@ -3,12 +3,14 @@
26501 #include <asm/dwarf2.h>
26502 #include <asm/asm.h>
26503 #include <asm/calling.h>
26504+#include <asm/alternative-asm.h>
26505
26506 ENTRY(___preempt_schedule)
26507 CFI_STARTPROC
26508 SAVE_ALL
26509 call preempt_schedule
26510 RESTORE_ALL
26511+ pax_force_retaddr
26512 ret
26513 CFI_ENDPROC
26514
26515@@ -19,6 +21,7 @@ ENTRY(___preempt_schedule_context)
26516 SAVE_ALL
26517 call preempt_schedule_context
26518 RESTORE_ALL
26519+ pax_force_retaddr
26520 ret
26521 CFI_ENDPROC
26522
26523diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
26524index 3fb8d95..254dc51 100644
26525--- a/arch/x86/kernel/process.c
26526+++ b/arch/x86/kernel/process.c
26527@@ -36,7 +36,8 @@
26528 * section. Since TSS's are completely CPU-local, we want them
26529 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
26530 */
26531-__visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
26532+struct tss_struct init_tss[NR_CPUS] __visible ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
26533+EXPORT_SYMBOL(init_tss);
26534
26535 #ifdef CONFIG_X86_64
26536 static DEFINE_PER_CPU(unsigned char, is_idle);
26537@@ -92,7 +93,7 @@ void arch_task_cache_init(void)
26538 task_xstate_cachep =
26539 kmem_cache_create("task_xstate", xstate_size,
26540 __alignof__(union thread_xstate),
26541- SLAB_PANIC | SLAB_NOTRACK, NULL);
26542+ SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
26543 }
26544
26545 /*
26546@@ -105,7 +106,7 @@ void exit_thread(void)
26547 unsigned long *bp = t->io_bitmap_ptr;
26548
26549 if (bp) {
26550- struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
26551+ struct tss_struct *tss = init_tss + get_cpu();
26552
26553 t->io_bitmap_ptr = NULL;
26554 clear_thread_flag(TIF_IO_BITMAP);
26555@@ -125,6 +126,9 @@ void flush_thread(void)
26556 {
26557 struct task_struct *tsk = current;
26558
26559+#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
26560+ loadsegment(gs, 0);
26561+#endif
26562 flush_ptrace_hw_breakpoint(tsk);
26563 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
26564 drop_init_fpu(tsk);
26565@@ -271,7 +275,7 @@ static void __exit_idle(void)
26566 void exit_idle(void)
26567 {
26568 /* idle loop has pid 0 */
26569- if (current->pid)
26570+ if (task_pid_nr(current))
26571 return;
26572 __exit_idle();
26573 }
26574@@ -327,7 +331,7 @@ bool xen_set_default_idle(void)
26575 return ret;
26576 }
26577 #endif
26578-void stop_this_cpu(void *dummy)
26579+__noreturn void stop_this_cpu(void *dummy)
26580 {
26581 local_irq_disable();
26582 /*
26583@@ -456,16 +460,37 @@ static int __init idle_setup(char *str)
26584 }
26585 early_param("idle", idle_setup);
26586
26587-unsigned long arch_align_stack(unsigned long sp)
26588+#ifdef CONFIG_PAX_RANDKSTACK
26589+void pax_randomize_kstack(struct pt_regs *regs)
26590 {
26591- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
26592- sp -= get_random_int() % 8192;
26593- return sp & ~0xf;
26594-}
26595+ struct thread_struct *thread = &current->thread;
26596+ unsigned long time;
26597
26598-unsigned long arch_randomize_brk(struct mm_struct *mm)
26599-{
26600- unsigned long range_end = mm->brk + 0x02000000;
26601- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
26602-}
26603+ if (!randomize_va_space)
26604+ return;
26605+
26606+ if (v8086_mode(regs))
26607+ return;
26608
26609+ rdtscl(time);
26610+
26611+ /* P4 seems to return a 0 LSB, ignore it */
26612+#ifdef CONFIG_MPENTIUM4
26613+ time &= 0x3EUL;
26614+ time <<= 2;
26615+#elif defined(CONFIG_X86_64)
26616+ time &= 0xFUL;
26617+ time <<= 4;
26618+#else
26619+ time &= 0x1FUL;
26620+ time <<= 3;
26621+#endif
26622+
26623+ thread->sp0 ^= time;
26624+ load_sp0(init_tss + smp_processor_id(), thread);
26625+
26626+#ifdef CONFIG_X86_64
26627+ this_cpu_write(kernel_stack, thread->sp0);
26628+#endif
26629+}
26630+#endif
26631diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
26632index 0de43e9..056b840 100644
26633--- a/arch/x86/kernel/process_32.c
26634+++ b/arch/x86/kernel/process_32.c
26635@@ -64,6 +64,7 @@ asmlinkage void ret_from_kernel_thread(void) __asm__("ret_from_kernel_thread");
26636 unsigned long thread_saved_pc(struct task_struct *tsk)
26637 {
26638 return ((unsigned long *)tsk->thread.sp)[3];
26639+//XXX return tsk->thread.eip;
26640 }
26641
26642 void __show_regs(struct pt_regs *regs, int all)
26643@@ -73,19 +74,18 @@ void __show_regs(struct pt_regs *regs, int all)
26644 unsigned long sp;
26645 unsigned short ss, gs;
26646
26647- if (user_mode_vm(regs)) {
26648+ if (user_mode(regs)) {
26649 sp = regs->sp;
26650 ss = regs->ss & 0xffff;
26651- gs = get_user_gs(regs);
26652 } else {
26653 sp = kernel_stack_pointer(regs);
26654 savesegment(ss, ss);
26655- savesegment(gs, gs);
26656 }
26657+ gs = get_user_gs(regs);
26658
26659 printk(KERN_DEFAULT "EIP: %04x:[<%08lx>] EFLAGS: %08lx CPU: %d\n",
26660 (u16)regs->cs, regs->ip, regs->flags,
26661- smp_processor_id());
26662+ raw_smp_processor_id());
26663 print_symbol("EIP is at %s\n", regs->ip);
26664
26665 printk(KERN_DEFAULT "EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n",
26666@@ -132,20 +132,21 @@ void release_thread(struct task_struct *dead_task)
26667 int copy_thread(unsigned long clone_flags, unsigned long sp,
26668 unsigned long arg, struct task_struct *p)
26669 {
26670- struct pt_regs *childregs = task_pt_regs(p);
26671+ struct pt_regs *childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
26672 struct task_struct *tsk;
26673 int err;
26674
26675 p->thread.sp = (unsigned long) childregs;
26676 p->thread.sp0 = (unsigned long) (childregs+1);
26677+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
26678
26679 if (unlikely(p->flags & PF_KTHREAD)) {
26680 /* kernel thread */
26681 memset(childregs, 0, sizeof(struct pt_regs));
26682 p->thread.ip = (unsigned long) ret_from_kernel_thread;
26683- task_user_gs(p) = __KERNEL_STACK_CANARY;
26684- childregs->ds = __USER_DS;
26685- childregs->es = __USER_DS;
26686+ savesegment(gs, childregs->gs);
26687+ childregs->ds = __KERNEL_DS;
26688+ childregs->es = __KERNEL_DS;
26689 childregs->fs = __KERNEL_PERCPU;
26690 childregs->bx = sp; /* function */
26691 childregs->bp = arg;
26692@@ -252,7 +253,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
26693 struct thread_struct *prev = &prev_p->thread,
26694 *next = &next_p->thread;
26695 int cpu = smp_processor_id();
26696- struct tss_struct *tss = &per_cpu(init_tss, cpu);
26697+ struct tss_struct *tss = init_tss + cpu;
26698 fpu_switch_t fpu;
26699
26700 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
26701@@ -276,6 +277,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
26702 */
26703 lazy_save_gs(prev->gs);
26704
26705+#ifdef CONFIG_PAX_MEMORY_UDEREF
26706+ __set_fs(task_thread_info(next_p)->addr_limit);
26707+#endif
26708+
26709 /*
26710 * Load the per-thread Thread-Local Storage descriptor.
26711 */
26712@@ -314,6 +319,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
26713 */
26714 arch_end_context_switch(next_p);
26715
26716+ this_cpu_write(current_task, next_p);
26717+ this_cpu_write(current_tinfo, &next_p->tinfo);
26718+
26719 /*
26720 * Restore %gs if needed (which is common)
26721 */
26722@@ -322,8 +330,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
26723
26724 switch_fpu_finish(next_p, fpu);
26725
26726- this_cpu_write(current_task, next_p);
26727-
26728 return prev_p;
26729 }
26730
26731@@ -353,4 +359,3 @@ unsigned long get_wchan(struct task_struct *p)
26732 } while (count++ < 16);
26733 return 0;
26734 }
26735-
26736diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
26737index 9c0280f..5bbb1c0 100644
26738--- a/arch/x86/kernel/process_64.c
26739+++ b/arch/x86/kernel/process_64.c
26740@@ -158,10 +158,11 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
26741 struct pt_regs *childregs;
26742 struct task_struct *me = current;
26743
26744- p->thread.sp0 = (unsigned long)task_stack_page(p) + THREAD_SIZE;
26745+ p->thread.sp0 = (unsigned long)task_stack_page(p) + THREAD_SIZE - 16;
26746 childregs = task_pt_regs(p);
26747 p->thread.sp = (unsigned long) childregs;
26748 p->thread.usersp = me->thread.usersp;
26749+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
26750 set_tsk_thread_flag(p, TIF_FORK);
26751 p->thread.fpu_counter = 0;
26752 p->thread.io_bitmap_ptr = NULL;
26753@@ -172,6 +173,8 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
26754 p->thread.fs = p->thread.fsindex ? 0 : me->thread.fs;
26755 savesegment(es, p->thread.es);
26756 savesegment(ds, p->thread.ds);
26757+ savesegment(ss, p->thread.ss);
26758+ BUG_ON(p->thread.ss == __UDEREF_KERNEL_DS);
26759 memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
26760
26761 if (unlikely(p->flags & PF_KTHREAD)) {
26762@@ -280,7 +283,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
26763 struct thread_struct *prev = &prev_p->thread;
26764 struct thread_struct *next = &next_p->thread;
26765 int cpu = smp_processor_id();
26766- struct tss_struct *tss = &per_cpu(init_tss, cpu);
26767+ struct tss_struct *tss = init_tss + cpu;
26768 unsigned fsindex, gsindex;
26769 fpu_switch_t fpu;
26770
26771@@ -303,6 +306,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
26772 if (unlikely(next->ds | prev->ds))
26773 loadsegment(ds, next->ds);
26774
26775+ savesegment(ss, prev->ss);
26776+ if (unlikely(next->ss != prev->ss))
26777+ loadsegment(ss, next->ss);
26778
26779 /* We must save %fs and %gs before load_TLS() because
26780 * %fs and %gs may be cleared by load_TLS().
26781@@ -362,6 +368,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
26782 prev->usersp = this_cpu_read(old_rsp);
26783 this_cpu_write(old_rsp, next->usersp);
26784 this_cpu_write(current_task, next_p);
26785+ this_cpu_write(current_tinfo, &next_p->tinfo);
26786
26787 /*
26788 * If it were not for PREEMPT_ACTIVE we could guarantee that the
26789@@ -371,9 +378,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
26790 task_thread_info(prev_p)->saved_preempt_count = this_cpu_read(__preempt_count);
26791 this_cpu_write(__preempt_count, task_thread_info(next_p)->saved_preempt_count);
26792
26793- this_cpu_write(kernel_stack,
26794- (unsigned long)task_stack_page(next_p) +
26795- THREAD_SIZE - KERNEL_STACK_OFFSET);
26796+ this_cpu_write(kernel_stack, next->sp0);
26797
26798 /*
26799 * Now maybe reload the debug registers and handle I/O bitmaps
26800@@ -442,12 +447,11 @@ unsigned long get_wchan(struct task_struct *p)
26801 if (!p || p == current || p->state == TASK_RUNNING)
26802 return 0;
26803 stack = (unsigned long)task_stack_page(p);
26804- if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
26805+ if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
26806 return 0;
26807 fp = *(u64 *)(p->thread.sp);
26808 do {
26809- if (fp < (unsigned long)stack ||
26810- fp >= (unsigned long)stack+THREAD_SIZE)
26811+ if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
26812 return 0;
26813 ip = *(u64 *)(fp+8);
26814 if (!in_sched_functions(ip))
26815diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
26816index 7461f50..1334029 100644
26817--- a/arch/x86/kernel/ptrace.c
26818+++ b/arch/x86/kernel/ptrace.c
26819@@ -184,14 +184,13 @@ unsigned long kernel_stack_pointer(struct pt_regs *regs)
26820 {
26821 unsigned long context = (unsigned long)regs & ~(THREAD_SIZE - 1);
26822 unsigned long sp = (unsigned long)&regs->sp;
26823- struct thread_info *tinfo;
26824
26825- if (context == (sp & ~(THREAD_SIZE - 1)))
26826+ if (context == ((sp + 8) & ~(THREAD_SIZE - 1)))
26827 return sp;
26828
26829- tinfo = (struct thread_info *)context;
26830- if (tinfo->previous_esp)
26831- return tinfo->previous_esp;
26832+ sp = *(unsigned long *)context;
26833+ if (sp)
26834+ return sp;
26835
26836 return (unsigned long)regs;
26837 }
26838@@ -588,7 +587,7 @@ static void ptrace_triggered(struct perf_event *bp,
26839 static unsigned long ptrace_get_dr7(struct perf_event *bp[])
26840 {
26841 int i;
26842- int dr7 = 0;
26843+ unsigned long dr7 = 0;
26844 struct arch_hw_breakpoint *info;
26845
26846 for (i = 0; i < HBP_NUM; i++) {
26847@@ -822,7 +821,7 @@ long arch_ptrace(struct task_struct *child, long request,
26848 unsigned long addr, unsigned long data)
26849 {
26850 int ret;
26851- unsigned long __user *datap = (unsigned long __user *)data;
26852+ unsigned long __user *datap = (__force unsigned long __user *)data;
26853
26854 switch (request) {
26855 /* read the word at location addr in the USER area. */
26856@@ -907,14 +906,14 @@ long arch_ptrace(struct task_struct *child, long request,
26857 if ((int) addr < 0)
26858 return -EIO;
26859 ret = do_get_thread_area(child, addr,
26860- (struct user_desc __user *)data);
26861+ (__force struct user_desc __user *) data);
26862 break;
26863
26864 case PTRACE_SET_THREAD_AREA:
26865 if ((int) addr < 0)
26866 return -EIO;
26867 ret = do_set_thread_area(child, addr,
26868- (struct user_desc __user *)data, 0);
26869+ (__force struct user_desc __user *) data, 0);
26870 break;
26871 #endif
26872
26873@@ -1292,7 +1291,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
26874
26875 #ifdef CONFIG_X86_64
26876
26877-static struct user_regset x86_64_regsets[] __read_mostly = {
26878+static user_regset_no_const x86_64_regsets[] __read_only = {
26879 [REGSET_GENERAL] = {
26880 .core_note_type = NT_PRSTATUS,
26881 .n = sizeof(struct user_regs_struct) / sizeof(long),
26882@@ -1333,7 +1332,7 @@ static const struct user_regset_view user_x86_64_view = {
26883 #endif /* CONFIG_X86_64 */
26884
26885 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
26886-static struct user_regset x86_32_regsets[] __read_mostly = {
26887+static user_regset_no_const x86_32_regsets[] __read_only = {
26888 [REGSET_GENERAL] = {
26889 .core_note_type = NT_PRSTATUS,
26890 .n = sizeof(struct user_regs_struct32) / sizeof(u32),
26891@@ -1386,7 +1385,7 @@ static const struct user_regset_view user_x86_32_view = {
26892 */
26893 u64 xstate_fx_sw_bytes[USER_XSTATE_FX_SW_WORDS];
26894
26895-void update_regset_xstate_info(unsigned int size, u64 xstate_mask)
26896+void __init update_regset_xstate_info(unsigned int size, u64 xstate_mask)
26897 {
26898 #ifdef CONFIG_X86_64
26899 x86_64_regsets[REGSET_XSTATE].n = size / sizeof(u64);
26900@@ -1421,7 +1420,7 @@ static void fill_sigtrap_info(struct task_struct *tsk,
26901 memset(info, 0, sizeof(*info));
26902 info->si_signo = SIGTRAP;
26903 info->si_code = si_code;
26904- info->si_addr = user_mode_vm(regs) ? (void __user *)regs->ip : NULL;
26905+ info->si_addr = user_mode(regs) ? (__force void __user *)regs->ip : NULL;
26906 }
26907
26908 void user_single_step_siginfo(struct task_struct *tsk,
26909@@ -1450,6 +1449,10 @@ void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
26910 # define IS_IA32 0
26911 #endif
26912
26913+#ifdef CONFIG_GRKERNSEC_SETXID
26914+extern void gr_delayed_cred_worker(void);
26915+#endif
26916+
26917 /*
26918 * We must return the syscall number to actually look up in the table.
26919 * This can be -1L to skip running any syscall at all.
26920@@ -1460,6 +1463,11 @@ long syscall_trace_enter(struct pt_regs *regs)
26921
26922 user_exit();
26923
26924+#ifdef CONFIG_GRKERNSEC_SETXID
26925+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
26926+ gr_delayed_cred_worker();
26927+#endif
26928+
26929 /*
26930 * If we stepped into a sysenter/syscall insn, it trapped in
26931 * kernel mode; do_debug() cleared TF and set TIF_SINGLESTEP.
26932@@ -1515,6 +1523,11 @@ void syscall_trace_leave(struct pt_regs *regs)
26933 */
26934 user_exit();
26935
26936+#ifdef CONFIG_GRKERNSEC_SETXID
26937+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
26938+ gr_delayed_cred_worker();
26939+#endif
26940+
26941 audit_syscall_exit(regs);
26942
26943 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
26944diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c
26945index 2f355d2..e75ed0a 100644
26946--- a/arch/x86/kernel/pvclock.c
26947+++ b/arch/x86/kernel/pvclock.c
26948@@ -51,11 +51,11 @@ void pvclock_touch_watchdogs(void)
26949 reset_hung_task_detector();
26950 }
26951
26952-static atomic64_t last_value = ATOMIC64_INIT(0);
26953+static atomic64_unchecked_t last_value = ATOMIC64_INIT(0);
26954
26955 void pvclock_resume(void)
26956 {
26957- atomic64_set(&last_value, 0);
26958+ atomic64_set_unchecked(&last_value, 0);
26959 }
26960
26961 u8 pvclock_read_flags(struct pvclock_vcpu_time_info *src)
26962@@ -105,11 +105,11 @@ cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
26963 * updating at the same time, and one of them could be slightly behind,
26964 * making the assumption that last_value always go forward fail to hold.
26965 */
26966- last = atomic64_read(&last_value);
26967+ last = atomic64_read_unchecked(&last_value);
26968 do {
26969 if (ret < last)
26970 return last;
26971- last = atomic64_cmpxchg(&last_value, last, ret);
26972+ last = atomic64_cmpxchg_unchecked(&last_value, last, ret);
26973 } while (unlikely(last != ret));
26974
26975 return ret;
26976diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
26977index c752cb4..866c432 100644
26978--- a/arch/x86/kernel/reboot.c
26979+++ b/arch/x86/kernel/reboot.c
26980@@ -68,6 +68,11 @@ static int __init set_bios_reboot(const struct dmi_system_id *d)
26981
26982 void __noreturn machine_real_restart(unsigned int type)
26983 {
26984+
26985+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
26986+ struct desc_struct *gdt;
26987+#endif
26988+
26989 local_irq_disable();
26990
26991 /*
26992@@ -95,7 +100,29 @@ void __noreturn machine_real_restart(unsigned int type)
26993
26994 /* Jump to the identity-mapped low memory code */
26995 #ifdef CONFIG_X86_32
26996- asm volatile("jmpl *%0" : :
26997+
26998+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
26999+ gdt = get_cpu_gdt_table(smp_processor_id());
27000+ pax_open_kernel();
27001+#ifdef CONFIG_PAX_MEMORY_UDEREF
27002+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
27003+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
27004+ loadsegment(ds, __KERNEL_DS);
27005+ loadsegment(es, __KERNEL_DS);
27006+ loadsegment(ss, __KERNEL_DS);
27007+#endif
27008+#ifdef CONFIG_PAX_KERNEXEC
27009+ gdt[GDT_ENTRY_KERNEL_CS].base0 = 0;
27010+ gdt[GDT_ENTRY_KERNEL_CS].base1 = 0;
27011+ gdt[GDT_ENTRY_KERNEL_CS].base2 = 0;
27012+ gdt[GDT_ENTRY_KERNEL_CS].limit0 = 0xffff;
27013+ gdt[GDT_ENTRY_KERNEL_CS].limit = 0xf;
27014+ gdt[GDT_ENTRY_KERNEL_CS].g = 1;
27015+#endif
27016+ pax_close_kernel();
27017+#endif
27018+
27019+ asm volatile("ljmpl *%0" : :
27020 "rm" (real_mode_header->machine_real_restart_asm),
27021 "a" (type));
27022 #else
27023@@ -470,7 +497,7 @@ void __attribute__((weak)) mach_reboot_fixups(void)
27024 * try to force a triple fault and then cycle between hitting the keyboard
27025 * controller and doing that
27026 */
27027-static void native_machine_emergency_restart(void)
27028+static void __noreturn native_machine_emergency_restart(void)
27029 {
27030 int i;
27031 int attempt = 0;
27032@@ -593,13 +620,13 @@ void native_machine_shutdown(void)
27033 #endif
27034 }
27035
27036-static void __machine_emergency_restart(int emergency)
27037+static void __noreturn __machine_emergency_restart(int emergency)
27038 {
27039 reboot_emergency = emergency;
27040 machine_ops.emergency_restart();
27041 }
27042
27043-static void native_machine_restart(char *__unused)
27044+static void __noreturn native_machine_restart(char *__unused)
27045 {
27046 pr_notice("machine restart\n");
27047
27048@@ -608,7 +635,7 @@ static void native_machine_restart(char *__unused)
27049 __machine_emergency_restart(0);
27050 }
27051
27052-static void native_machine_halt(void)
27053+static void __noreturn native_machine_halt(void)
27054 {
27055 /* Stop other cpus and apics */
27056 machine_shutdown();
27057@@ -618,7 +645,7 @@ static void native_machine_halt(void)
27058 stop_this_cpu(NULL);
27059 }
27060
27061-static void native_machine_power_off(void)
27062+static void __noreturn native_machine_power_off(void)
27063 {
27064 if (pm_power_off) {
27065 if (!reboot_force)
27066@@ -627,9 +654,10 @@ static void native_machine_power_off(void)
27067 }
27068 /* A fallback in case there is no PM info available */
27069 tboot_shutdown(TB_SHUTDOWN_HALT);
27070+ unreachable();
27071 }
27072
27073-struct machine_ops machine_ops = {
27074+struct machine_ops machine_ops __read_only = {
27075 .power_off = native_machine_power_off,
27076 .shutdown = native_machine_shutdown,
27077 .emergency_restart = native_machine_emergency_restart,
27078diff --git a/arch/x86/kernel/reboot_fixups_32.c b/arch/x86/kernel/reboot_fixups_32.c
27079index c8e41e9..64049ef 100644
27080--- a/arch/x86/kernel/reboot_fixups_32.c
27081+++ b/arch/x86/kernel/reboot_fixups_32.c
27082@@ -57,7 +57,7 @@ struct device_fixup {
27083 unsigned int vendor;
27084 unsigned int device;
27085 void (*reboot_fixup)(struct pci_dev *);
27086-};
27087+} __do_const;
27088
27089 /*
27090 * PCI ids solely used for fixups_table go here
27091diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S
27092index 3fd2c69..a444264 100644
27093--- a/arch/x86/kernel/relocate_kernel_64.S
27094+++ b/arch/x86/kernel/relocate_kernel_64.S
27095@@ -96,8 +96,7 @@ relocate_kernel:
27096
27097 /* jump to identity mapped page */
27098 addq $(identity_mapped - relocate_kernel), %r8
27099- pushq %r8
27100- ret
27101+ jmp *%r8
27102
27103 identity_mapped:
27104 /* set return address to 0 if not preserving context */
27105diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
27106index ce72964..be8aea7 100644
27107--- a/arch/x86/kernel/setup.c
27108+++ b/arch/x86/kernel/setup.c
27109@@ -110,6 +110,7 @@
27110 #include <asm/mce.h>
27111 #include <asm/alternative.h>
27112 #include <asm/prom.h>
27113+#include <asm/boot.h>
27114
27115 /*
27116 * max_low_pfn_mapped: highest direct mapped pfn under 4GB
27117@@ -205,12 +206,50 @@ EXPORT_SYMBOL(boot_cpu_data);
27118 #endif
27119
27120
27121-#if !defined(CONFIG_X86_PAE) || defined(CONFIG_X86_64)
27122-__visible unsigned long mmu_cr4_features;
27123+#ifdef CONFIG_X86_64
27124+__visible unsigned long mmu_cr4_features __read_only = X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE;
27125+#elif defined(CONFIG_X86_PAE)
27126+__visible unsigned long mmu_cr4_features __read_only = X86_CR4_PAE;
27127 #else
27128-__visible unsigned long mmu_cr4_features = X86_CR4_PAE;
27129+__visible unsigned long mmu_cr4_features __read_only;
27130 #endif
27131
27132+void set_in_cr4(unsigned long mask)
27133+{
27134+ unsigned long cr4 = read_cr4();
27135+
27136+ if ((cr4 & mask) == mask && cr4 == mmu_cr4_features)
27137+ return;
27138+
27139+ pax_open_kernel();
27140+ mmu_cr4_features |= mask;
27141+ pax_close_kernel();
27142+
27143+ if (trampoline_cr4_features)
27144+ *trampoline_cr4_features = mmu_cr4_features;
27145+ cr4 |= mask;
27146+ write_cr4(cr4);
27147+}
27148+EXPORT_SYMBOL(set_in_cr4);
27149+
27150+void clear_in_cr4(unsigned long mask)
27151+{
27152+ unsigned long cr4 = read_cr4();
27153+
27154+ if (!(cr4 & mask) && cr4 == mmu_cr4_features)
27155+ return;
27156+
27157+ pax_open_kernel();
27158+ mmu_cr4_features &= ~mask;
27159+ pax_close_kernel();
27160+
27161+ if (trampoline_cr4_features)
27162+ *trampoline_cr4_features = mmu_cr4_features;
27163+ cr4 &= ~mask;
27164+ write_cr4(cr4);
27165+}
27166+EXPORT_SYMBOL(clear_in_cr4);
27167+
27168 /* Boot loader ID and version as integers, for the benefit of proc_dointvec */
27169 int bootloader_type, bootloader_version;
27170
27171@@ -772,7 +811,7 @@ static void __init trim_bios_range(void)
27172 * area (640->1Mb) as ram even though it is not.
27173 * take them out.
27174 */
27175- e820_remove_range(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_RAM, 1);
27176+ e820_remove_range(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_RAM, 1);
27177
27178 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
27179 }
27180@@ -780,7 +819,7 @@ static void __init trim_bios_range(void)
27181 /* called before trim_bios_range() to spare extra sanitize */
27182 static void __init e820_add_kernel_range(void)
27183 {
27184- u64 start = __pa_symbol(_text);
27185+ u64 start = __pa_symbol(ktla_ktva(_text));
27186 u64 size = __pa_symbol(_end) - start;
27187
27188 /*
27189@@ -856,8 +895,12 @@ dump_kernel_offset(struct notifier_block *self, unsigned long v, void *p)
27190
27191 void __init setup_arch(char **cmdline_p)
27192 {
27193+#ifdef CONFIG_X86_32
27194+ memblock_reserve(LOAD_PHYSICAL_ADDR, __pa_symbol(__bss_stop) - LOAD_PHYSICAL_ADDR);
27195+#else
27196 memblock_reserve(__pa_symbol(_text),
27197 (unsigned long)__bss_stop - (unsigned long)_text);
27198+#endif
27199
27200 early_reserve_initrd();
27201
27202@@ -947,14 +990,14 @@ void __init setup_arch(char **cmdline_p)
27203
27204 if (!boot_params.hdr.root_flags)
27205 root_mountflags &= ~MS_RDONLY;
27206- init_mm.start_code = (unsigned long) _text;
27207- init_mm.end_code = (unsigned long) _etext;
27208+ init_mm.start_code = ktla_ktva((unsigned long) _text);
27209+ init_mm.end_code = ktla_ktva((unsigned long) _etext);
27210 init_mm.end_data = (unsigned long) _edata;
27211 init_mm.brk = _brk_end;
27212
27213- code_resource.start = __pa_symbol(_text);
27214- code_resource.end = __pa_symbol(_etext)-1;
27215- data_resource.start = __pa_symbol(_etext);
27216+ code_resource.start = __pa_symbol(ktla_ktva(_text));
27217+ code_resource.end = __pa_symbol(ktla_ktva(_etext))-1;
27218+ data_resource.start = __pa_symbol(_sdata);
27219 data_resource.end = __pa_symbol(_edata)-1;
27220 bss_resource.start = __pa_symbol(__bss_start);
27221 bss_resource.end = __pa_symbol(__bss_stop)-1;
27222diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
27223index 5cdff03..80fa283 100644
27224--- a/arch/x86/kernel/setup_percpu.c
27225+++ b/arch/x86/kernel/setup_percpu.c
27226@@ -21,19 +21,17 @@
27227 #include <asm/cpu.h>
27228 #include <asm/stackprotector.h>
27229
27230-DEFINE_PER_CPU_READ_MOSTLY(int, cpu_number);
27231+#ifdef CONFIG_SMP
27232+DEFINE_PER_CPU_READ_MOSTLY(unsigned int, cpu_number);
27233 EXPORT_PER_CPU_SYMBOL(cpu_number);
27234+#endif
27235
27236-#ifdef CONFIG_X86_64
27237 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
27238-#else
27239-#define BOOT_PERCPU_OFFSET 0
27240-#endif
27241
27242 DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
27243 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
27244
27245-unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
27246+unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
27247 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
27248 };
27249 EXPORT_SYMBOL(__per_cpu_offset);
27250@@ -66,7 +64,7 @@ static bool __init pcpu_need_numa(void)
27251 {
27252 #ifdef CONFIG_NEED_MULTIPLE_NODES
27253 pg_data_t *last = NULL;
27254- unsigned int cpu;
27255+ int cpu;
27256
27257 for_each_possible_cpu(cpu) {
27258 int node = early_cpu_to_node(cpu);
27259@@ -155,10 +153,10 @@ static inline void setup_percpu_segment(int cpu)
27260 {
27261 #ifdef CONFIG_X86_32
27262 struct desc_struct gdt;
27263+ unsigned long base = per_cpu_offset(cpu);
27264
27265- pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
27266- 0x2 | DESCTYPE_S, 0x8);
27267- gdt.s = 1;
27268+ pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
27269+ 0x83 | DESCTYPE_S, 0xC);
27270 write_gdt_entry(get_cpu_gdt_table(cpu),
27271 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
27272 #endif
27273@@ -219,6 +217,11 @@ void __init setup_per_cpu_areas(void)
27274 /* alrighty, percpu areas up and running */
27275 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
27276 for_each_possible_cpu(cpu) {
27277+#ifdef CONFIG_CC_STACKPROTECTOR
27278+#ifdef CONFIG_X86_32
27279+ unsigned long canary = per_cpu(stack_canary.canary, cpu);
27280+#endif
27281+#endif
27282 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
27283 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
27284 per_cpu(cpu_number, cpu) = cpu;
27285@@ -259,6 +262,12 @@ void __init setup_per_cpu_areas(void)
27286 */
27287 set_cpu_numa_node(cpu, early_cpu_to_node(cpu));
27288 #endif
27289+#ifdef CONFIG_CC_STACKPROTECTOR
27290+#ifdef CONFIG_X86_32
27291+ if (!cpu)
27292+ per_cpu(stack_canary.canary, cpu) = canary;
27293+#endif
27294+#endif
27295 /*
27296 * Up to this point, the boot CPU has been using .init.data
27297 * area. Reload any changed state for the boot CPU.
27298diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
27299index 9e5de68..16c53cb 100644
27300--- a/arch/x86/kernel/signal.c
27301+++ b/arch/x86/kernel/signal.c
27302@@ -190,7 +190,7 @@ static unsigned long align_sigframe(unsigned long sp)
27303 * Align the stack pointer according to the i386 ABI,
27304 * i.e. so that on function entry ((sp + 4) & 15) == 0.
27305 */
27306- sp = ((sp + 4) & -16ul) - 4;
27307+ sp = ((sp - 12) & -16ul) - 4;
27308 #else /* !CONFIG_X86_32 */
27309 sp = round_down(sp, 16) - 8;
27310 #endif
27311@@ -298,9 +298,9 @@ __setup_frame(int sig, struct ksignal *ksig, sigset_t *set,
27312 }
27313
27314 if (current->mm->context.vdso)
27315- restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
27316+ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
27317 else
27318- restorer = &frame->retcode;
27319+ restorer = (void __user *)&frame->retcode;
27320 if (ksig->ka.sa.sa_flags & SA_RESTORER)
27321 restorer = ksig->ka.sa.sa_restorer;
27322
27323@@ -314,7 +314,7 @@ __setup_frame(int sig, struct ksignal *ksig, sigset_t *set,
27324 * reasons and because gdb uses it as a signature to notice
27325 * signal handler stack frames.
27326 */
27327- err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
27328+ err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
27329
27330 if (err)
27331 return -EFAULT;
27332@@ -361,7 +361,10 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
27333 save_altstack_ex(&frame->uc.uc_stack, regs->sp);
27334
27335 /* Set up to return from userspace. */
27336- restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
27337+ if (current->mm->context.vdso)
27338+ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
27339+ else
27340+ restorer = (void __user *)&frame->retcode;
27341 if (ksig->ka.sa.sa_flags & SA_RESTORER)
27342 restorer = ksig->ka.sa.sa_restorer;
27343 put_user_ex(restorer, &frame->pretcode);
27344@@ -373,7 +376,7 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
27345 * reasons and because gdb uses it as a signature to notice
27346 * signal handler stack frames.
27347 */
27348- put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
27349+ put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
27350 } put_user_catch(err);
27351
27352 err |= copy_siginfo_to_user(&frame->info, &ksig->info);
27353@@ -609,7 +612,12 @@ setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs)
27354 {
27355 int usig = signr_convert(ksig->sig);
27356 sigset_t *set = sigmask_to_save();
27357- compat_sigset_t *cset = (compat_sigset_t *) set;
27358+ sigset_t sigcopy;
27359+ compat_sigset_t *cset;
27360+
27361+ sigcopy = *set;
27362+
27363+ cset = (compat_sigset_t *) &sigcopy;
27364
27365 /* Set up the stack frame */
27366 if (is_ia32_frame()) {
27367@@ -620,7 +628,7 @@ setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs)
27368 } else if (is_x32_frame()) {
27369 return x32_setup_rt_frame(ksig, cset, regs);
27370 } else {
27371- return __setup_rt_frame(ksig->sig, ksig, set, regs);
27372+ return __setup_rt_frame(ksig->sig, ksig, &sigcopy, regs);
27373 }
27374 }
27375
27376diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c
27377index 7c3a5a6..f0a8961 100644
27378--- a/arch/x86/kernel/smp.c
27379+++ b/arch/x86/kernel/smp.c
27380@@ -341,7 +341,7 @@ static int __init nonmi_ipi_setup(char *str)
27381
27382 __setup("nonmi_ipi", nonmi_ipi_setup);
27383
27384-struct smp_ops smp_ops = {
27385+struct smp_ops smp_ops __read_only = {
27386 .smp_prepare_boot_cpu = native_smp_prepare_boot_cpu,
27387 .smp_prepare_cpus = native_smp_prepare_cpus,
27388 .smp_cpus_done = native_smp_cpus_done,
27389diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
27390index a32da80..041a4ff 100644
27391--- a/arch/x86/kernel/smpboot.c
27392+++ b/arch/x86/kernel/smpboot.c
27393@@ -229,14 +229,17 @@ static void notrace start_secondary(void *unused)
27394
27395 enable_start_cpu0 = 0;
27396
27397-#ifdef CONFIG_X86_32
27398+ /* otherwise gcc will move up smp_processor_id before the cpu_init */
27399+ barrier();
27400+
27401 /* switch away from the initial page table */
27402+#ifdef CONFIG_PAX_PER_CPU_PGD
27403+ load_cr3(get_cpu_pgd(smp_processor_id(), kernel));
27404+#else
27405 load_cr3(swapper_pg_dir);
27406+#endif
27407 __flush_tlb_all();
27408-#endif
27409
27410- /* otherwise gcc will move up smp_processor_id before the cpu_init */
27411- barrier();
27412 /*
27413 * Check TSC synchronization with the BP:
27414 */
27415@@ -749,8 +752,9 @@ static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
27416 alternatives_enable_smp();
27417
27418 idle->thread.sp = (unsigned long) (((struct pt_regs *)
27419- (THREAD_SIZE + task_stack_page(idle))) - 1);
27420+ (THREAD_SIZE - 16 + task_stack_page(idle))) - 1);
27421 per_cpu(current_task, cpu) = idle;
27422+ per_cpu(current_tinfo, cpu) = &idle->tinfo;
27423
27424 #ifdef CONFIG_X86_32
27425 /* Stack for startup_32 can be just as for start_secondary onwards */
27426@@ -758,11 +762,13 @@ static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
27427 #else
27428 clear_tsk_thread_flag(idle, TIF_FORK);
27429 initial_gs = per_cpu_offset(cpu);
27430- per_cpu(kernel_stack, cpu) =
27431- (unsigned long)task_stack_page(idle) -
27432- KERNEL_STACK_OFFSET + THREAD_SIZE;
27433+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
27434 #endif
27435+
27436+ pax_open_kernel();
27437 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
27438+ pax_close_kernel();
27439+
27440 initial_code = (unsigned long)start_secondary;
27441 stack_start = idle->thread.sp;
27442
27443@@ -911,6 +917,15 @@ int native_cpu_up(unsigned int cpu, struct task_struct *tidle)
27444 /* the FPU context is blank, nobody can own it */
27445 __cpu_disable_lazy_restore(cpu);
27446
27447+#ifdef CONFIG_PAX_PER_CPU_PGD
27448+ clone_pgd_range(get_cpu_pgd(cpu, kernel) + KERNEL_PGD_BOUNDARY,
27449+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
27450+ KERNEL_PGD_PTRS);
27451+ clone_pgd_range(get_cpu_pgd(cpu, user) + KERNEL_PGD_BOUNDARY,
27452+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
27453+ KERNEL_PGD_PTRS);
27454+#endif
27455+
27456 err = do_boot_cpu(apicid, cpu, tidle);
27457 if (err) {
27458 pr_debug("do_boot_cpu failed %d\n", err);
27459diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c
27460index 9b4d51d..5d28b58 100644
27461--- a/arch/x86/kernel/step.c
27462+++ b/arch/x86/kernel/step.c
27463@@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
27464 struct desc_struct *desc;
27465 unsigned long base;
27466
27467- seg &= ~7UL;
27468+ seg >>= 3;
27469
27470 mutex_lock(&child->mm->context.lock);
27471- if (unlikely((seg >> 3) >= child->mm->context.size))
27472+ if (unlikely(seg >= child->mm->context.size))
27473 addr = -1L; /* bogus selector, access would fault */
27474 else {
27475 desc = child->mm->context.ldt + seg;
27476@@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
27477 addr += base;
27478 }
27479 mutex_unlock(&child->mm->context.lock);
27480- }
27481+ } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
27482+ addr = ktla_ktva(addr);
27483
27484 return addr;
27485 }
27486@@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
27487 unsigned char opcode[15];
27488 unsigned long addr = convert_ip_to_linear(child, regs);
27489
27490+ if (addr == -EINVAL)
27491+ return 0;
27492+
27493 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
27494 for (i = 0; i < copied; i++) {
27495 switch (opcode[i]) {
27496diff --git a/arch/x86/kernel/sys_i386_32.c b/arch/x86/kernel/sys_i386_32.c
27497new file mode 100644
27498index 0000000..5877189
27499--- /dev/null
27500+++ b/arch/x86/kernel/sys_i386_32.c
27501@@ -0,0 +1,189 @@
27502+/*
27503+ * This file contains various random system calls that
27504+ * have a non-standard calling sequence on the Linux/i386
27505+ * platform.
27506+ */
27507+
27508+#include <linux/errno.h>
27509+#include <linux/sched.h>
27510+#include <linux/mm.h>
27511+#include <linux/fs.h>
27512+#include <linux/smp.h>
27513+#include <linux/sem.h>
27514+#include <linux/msg.h>
27515+#include <linux/shm.h>
27516+#include <linux/stat.h>
27517+#include <linux/syscalls.h>
27518+#include <linux/mman.h>
27519+#include <linux/file.h>
27520+#include <linux/utsname.h>
27521+#include <linux/ipc.h>
27522+#include <linux/elf.h>
27523+
27524+#include <linux/uaccess.h>
27525+#include <linux/unistd.h>
27526+
27527+#include <asm/syscalls.h>
27528+
27529+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
27530+{
27531+ unsigned long pax_task_size = TASK_SIZE;
27532+
27533+#ifdef CONFIG_PAX_SEGMEXEC
27534+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
27535+ pax_task_size = SEGMEXEC_TASK_SIZE;
27536+#endif
27537+
27538+ if (flags & MAP_FIXED)
27539+ if (len > pax_task_size || addr > pax_task_size - len)
27540+ return -EINVAL;
27541+
27542+ return 0;
27543+}
27544+
27545+/*
27546+ * Align a virtual address to avoid aliasing in the I$ on AMD F15h.
27547+ */
27548+static unsigned long get_align_mask(void)
27549+{
27550+ if (va_align.flags < 0 || !(va_align.flags & ALIGN_VA_32))
27551+ return 0;
27552+
27553+ if (!(current->flags & PF_RANDOMIZE))
27554+ return 0;
27555+
27556+ return va_align.mask;
27557+}
27558+
27559+unsigned long
27560+arch_get_unmapped_area(struct file *filp, unsigned long addr,
27561+ unsigned long len, unsigned long pgoff, unsigned long flags)
27562+{
27563+ struct mm_struct *mm = current->mm;
27564+ struct vm_area_struct *vma;
27565+ unsigned long pax_task_size = TASK_SIZE;
27566+ struct vm_unmapped_area_info info;
27567+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
27568+
27569+#ifdef CONFIG_PAX_SEGMEXEC
27570+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
27571+ pax_task_size = SEGMEXEC_TASK_SIZE;
27572+#endif
27573+
27574+ pax_task_size -= PAGE_SIZE;
27575+
27576+ if (len > pax_task_size)
27577+ return -ENOMEM;
27578+
27579+ if (flags & MAP_FIXED)
27580+ return addr;
27581+
27582+#ifdef CONFIG_PAX_RANDMMAP
27583+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
27584+#endif
27585+
27586+ if (addr) {
27587+ addr = PAGE_ALIGN(addr);
27588+ if (pax_task_size - len >= addr) {
27589+ vma = find_vma(mm, addr);
27590+ if (check_heap_stack_gap(vma, addr, len, offset))
27591+ return addr;
27592+ }
27593+ }
27594+
27595+ info.flags = 0;
27596+ info.length = len;
27597+ info.align_mask = filp ? get_align_mask() : 0;
27598+ info.align_offset = pgoff << PAGE_SHIFT;
27599+ info.threadstack_offset = offset;
27600+
27601+#ifdef CONFIG_PAX_PAGEEXEC
27602+ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE)) {
27603+ info.low_limit = 0x00110000UL;
27604+ info.high_limit = mm->start_code;
27605+
27606+#ifdef CONFIG_PAX_RANDMMAP
27607+ if (mm->pax_flags & MF_PAX_RANDMMAP)
27608+ info.low_limit += mm->delta_mmap & 0x03FFF000UL;
27609+#endif
27610+
27611+ if (info.low_limit < info.high_limit) {
27612+ addr = vm_unmapped_area(&info);
27613+ if (!IS_ERR_VALUE(addr))
27614+ return addr;
27615+ }
27616+ } else
27617+#endif
27618+
27619+ info.low_limit = mm->mmap_base;
27620+ info.high_limit = pax_task_size;
27621+
27622+ return vm_unmapped_area(&info);
27623+}
27624+
27625+unsigned long
27626+arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
27627+ const unsigned long len, const unsigned long pgoff,
27628+ const unsigned long flags)
27629+{
27630+ struct vm_area_struct *vma;
27631+ struct mm_struct *mm = current->mm;
27632+ unsigned long addr = addr0, pax_task_size = TASK_SIZE;
27633+ struct vm_unmapped_area_info info;
27634+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
27635+
27636+#ifdef CONFIG_PAX_SEGMEXEC
27637+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
27638+ pax_task_size = SEGMEXEC_TASK_SIZE;
27639+#endif
27640+
27641+ pax_task_size -= PAGE_SIZE;
27642+
27643+ /* requested length too big for entire address space */
27644+ if (len > pax_task_size)
27645+ return -ENOMEM;
27646+
27647+ if (flags & MAP_FIXED)
27648+ return addr;
27649+
27650+#ifdef CONFIG_PAX_PAGEEXEC
27651+ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
27652+ goto bottomup;
27653+#endif
27654+
27655+#ifdef CONFIG_PAX_RANDMMAP
27656+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
27657+#endif
27658+
27659+ /* requesting a specific address */
27660+ if (addr) {
27661+ addr = PAGE_ALIGN(addr);
27662+ if (pax_task_size - len >= addr) {
27663+ vma = find_vma(mm, addr);
27664+ if (check_heap_stack_gap(vma, addr, len, offset))
27665+ return addr;
27666+ }
27667+ }
27668+
27669+ info.flags = VM_UNMAPPED_AREA_TOPDOWN;
27670+ info.length = len;
27671+ info.low_limit = PAGE_SIZE;
27672+ info.high_limit = mm->mmap_base;
27673+ info.align_mask = filp ? get_align_mask() : 0;
27674+ info.align_offset = pgoff << PAGE_SHIFT;
27675+ info.threadstack_offset = offset;
27676+
27677+ addr = vm_unmapped_area(&info);
27678+ if (!(addr & ~PAGE_MASK))
27679+ return addr;
27680+ VM_BUG_ON(addr != -ENOMEM);
27681+
27682+bottomup:
27683+ /*
27684+ * A failed mmap() very likely causes application failure,
27685+ * so fall back to the bottom-up function here. This scenario
27686+ * can happen with large stack limits and large mmap()
27687+ * allocations.
27688+ */
27689+ return arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
27690+}
27691diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
27692index 30277e2..5664a29 100644
27693--- a/arch/x86/kernel/sys_x86_64.c
27694+++ b/arch/x86/kernel/sys_x86_64.c
27695@@ -81,8 +81,8 @@ out:
27696 return error;
27697 }
27698
27699-static void find_start_end(unsigned long flags, unsigned long *begin,
27700- unsigned long *end)
27701+static void find_start_end(struct mm_struct *mm, unsigned long flags,
27702+ unsigned long *begin, unsigned long *end)
27703 {
27704 if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT)) {
27705 unsigned long new_begin;
27706@@ -101,7 +101,7 @@ static void find_start_end(unsigned long flags, unsigned long *begin,
27707 *begin = new_begin;
27708 }
27709 } else {
27710- *begin = current->mm->mmap_legacy_base;
27711+ *begin = mm->mmap_legacy_base;
27712 *end = TASK_SIZE;
27713 }
27714 }
27715@@ -114,20 +114,24 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
27716 struct vm_area_struct *vma;
27717 struct vm_unmapped_area_info info;
27718 unsigned long begin, end;
27719+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
27720
27721 if (flags & MAP_FIXED)
27722 return addr;
27723
27724- find_start_end(flags, &begin, &end);
27725+ find_start_end(mm, flags, &begin, &end);
27726
27727 if (len > end)
27728 return -ENOMEM;
27729
27730+#ifdef CONFIG_PAX_RANDMMAP
27731+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
27732+#endif
27733+
27734 if (addr) {
27735 addr = PAGE_ALIGN(addr);
27736 vma = find_vma(mm, addr);
27737- if (end - len >= addr &&
27738- (!vma || addr + len <= vma->vm_start))
27739+ if (end - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
27740 return addr;
27741 }
27742
27743@@ -137,6 +141,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
27744 info.high_limit = end;
27745 info.align_mask = filp ? get_align_mask() : 0;
27746 info.align_offset = pgoff << PAGE_SHIFT;
27747+ info.threadstack_offset = offset;
27748 return vm_unmapped_area(&info);
27749 }
27750
27751@@ -149,6 +154,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
27752 struct mm_struct *mm = current->mm;
27753 unsigned long addr = addr0;
27754 struct vm_unmapped_area_info info;
27755+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
27756
27757 /* requested length too big for entire address space */
27758 if (len > TASK_SIZE)
27759@@ -161,12 +167,15 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
27760 if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT))
27761 goto bottomup;
27762
27763+#ifdef CONFIG_PAX_RANDMMAP
27764+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
27765+#endif
27766+
27767 /* requesting a specific address */
27768 if (addr) {
27769 addr = PAGE_ALIGN(addr);
27770 vma = find_vma(mm, addr);
27771- if (TASK_SIZE - len >= addr &&
27772- (!vma || addr + len <= vma->vm_start))
27773+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
27774 return addr;
27775 }
27776
27777@@ -176,6 +185,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
27778 info.high_limit = mm->mmap_base;
27779 info.align_mask = filp ? get_align_mask() : 0;
27780 info.align_offset = pgoff << PAGE_SHIFT;
27781+ info.threadstack_offset = offset;
27782 addr = vm_unmapped_area(&info);
27783 if (!(addr & ~PAGE_MASK))
27784 return addr;
27785diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
27786index 91a4496..bb87552 100644
27787--- a/arch/x86/kernel/tboot.c
27788+++ b/arch/x86/kernel/tboot.c
27789@@ -221,7 +221,7 @@ static int tboot_setup_sleep(void)
27790
27791 void tboot_shutdown(u32 shutdown_type)
27792 {
27793- void (*shutdown)(void);
27794+ void (* __noreturn shutdown)(void);
27795
27796 if (!tboot_enabled())
27797 return;
27798@@ -243,7 +243,7 @@ void tboot_shutdown(u32 shutdown_type)
27799
27800 switch_to_tboot_pt();
27801
27802- shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
27803+ shutdown = (void *)(unsigned long)tboot->shutdown_entry;
27804 shutdown();
27805
27806 /* should not reach here */
27807@@ -310,7 +310,7 @@ static int tboot_extended_sleep(u8 sleep_state, u32 val_a, u32 val_b)
27808 return -ENODEV;
27809 }
27810
27811-static atomic_t ap_wfs_count;
27812+static atomic_unchecked_t ap_wfs_count;
27813
27814 static int tboot_wait_for_aps(int num_aps)
27815 {
27816@@ -334,9 +334,9 @@ static int tboot_cpu_callback(struct notifier_block *nfb, unsigned long action,
27817 {
27818 switch (action) {
27819 case CPU_DYING:
27820- atomic_inc(&ap_wfs_count);
27821+ atomic_inc_unchecked(&ap_wfs_count);
27822 if (num_online_cpus() == 1)
27823- if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
27824+ if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
27825 return NOTIFY_BAD;
27826 break;
27827 }
27828@@ -422,7 +422,7 @@ static __init int tboot_late_init(void)
27829
27830 tboot_create_trampoline();
27831
27832- atomic_set(&ap_wfs_count, 0);
27833+ atomic_set_unchecked(&ap_wfs_count, 0);
27834 register_hotcpu_notifier(&tboot_cpu_notifier);
27835
27836 #ifdef CONFIG_DEBUG_FS
27837diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c
27838index 24d3c91..d06b473 100644
27839--- a/arch/x86/kernel/time.c
27840+++ b/arch/x86/kernel/time.c
27841@@ -30,9 +30,9 @@ unsigned long profile_pc(struct pt_regs *regs)
27842 {
27843 unsigned long pc = instruction_pointer(regs);
27844
27845- if (!user_mode_vm(regs) && in_lock_functions(pc)) {
27846+ if (!user_mode(regs) && in_lock_functions(pc)) {
27847 #ifdef CONFIG_FRAME_POINTER
27848- return *(unsigned long *)(regs->bp + sizeof(long));
27849+ return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
27850 #else
27851 unsigned long *sp =
27852 (unsigned long *)kernel_stack_pointer(regs);
27853@@ -41,11 +41,17 @@ unsigned long profile_pc(struct pt_regs *regs)
27854 * or above a saved flags. Eflags has bits 22-31 zero,
27855 * kernel addresses don't.
27856 */
27857+
27858+#ifdef CONFIG_PAX_KERNEXEC
27859+ return ktla_ktva(sp[0]);
27860+#else
27861 if (sp[0] >> 22)
27862 return sp[0];
27863 if (sp[1] >> 22)
27864 return sp[1];
27865 #endif
27866+
27867+#endif
27868 }
27869 return pc;
27870 }
27871diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c
27872index f7fec09..9991981 100644
27873--- a/arch/x86/kernel/tls.c
27874+++ b/arch/x86/kernel/tls.c
27875@@ -84,6 +84,11 @@ int do_set_thread_area(struct task_struct *p, int idx,
27876 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
27877 return -EINVAL;
27878
27879+#ifdef CONFIG_PAX_SEGMEXEC
27880+ if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
27881+ return -EINVAL;
27882+#endif
27883+
27884 set_tls_desc(p, idx, &info, 1);
27885
27886 return 0;
27887@@ -200,7 +205,7 @@ int regset_tls_set(struct task_struct *target, const struct user_regset *regset,
27888
27889 if (kbuf)
27890 info = kbuf;
27891- else if (__copy_from_user(infobuf, ubuf, count))
27892+ else if (count > sizeof infobuf || __copy_from_user(infobuf, ubuf, count))
27893 return -EFAULT;
27894 else
27895 info = infobuf;
27896diff --git a/arch/x86/kernel/tracepoint.c b/arch/x86/kernel/tracepoint.c
27897index 1c113db..287b42e 100644
27898--- a/arch/x86/kernel/tracepoint.c
27899+++ b/arch/x86/kernel/tracepoint.c
27900@@ -9,11 +9,11 @@
27901 #include <linux/atomic.h>
27902
27903 atomic_t trace_idt_ctr = ATOMIC_INIT(0);
27904-struct desc_ptr trace_idt_descr = { NR_VECTORS * 16 - 1,
27905+const struct desc_ptr trace_idt_descr = { NR_VECTORS * 16 - 1,
27906 (unsigned long) trace_idt_table };
27907
27908 /* No need to be aligned, but done to keep all IDTs defined the same way. */
27909-gate_desc trace_idt_table[NR_VECTORS] __page_aligned_bss;
27910+gate_desc trace_idt_table[NR_VECTORS] __page_aligned_rodata;
27911
27912 static int trace_irq_vector_refcount;
27913 static DEFINE_MUTEX(irq_vector_mutex);
27914diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
27915index 57409f6..b505597 100644
27916--- a/arch/x86/kernel/traps.c
27917+++ b/arch/x86/kernel/traps.c
27918@@ -66,7 +66,7 @@
27919 #include <asm/proto.h>
27920
27921 /* No need to be aligned, but done to keep all IDTs defined the same way. */
27922-gate_desc debug_idt_table[NR_VECTORS] __page_aligned_bss;
27923+gate_desc debug_idt_table[NR_VECTORS] __page_aligned_rodata;
27924 #else
27925 #include <asm/processor-flags.h>
27926 #include <asm/setup.h>
27927@@ -75,7 +75,7 @@ asmlinkage int system_call(void);
27928 #endif
27929
27930 /* Must be page-aligned because the real IDT is used in a fixmap. */
27931-gate_desc idt_table[NR_VECTORS] __page_aligned_bss;
27932+gate_desc idt_table[NR_VECTORS] __page_aligned_rodata;
27933
27934 DECLARE_BITMAP(used_vectors, NR_VECTORS);
27935 EXPORT_SYMBOL_GPL(used_vectors);
27936@@ -107,11 +107,11 @@ static inline void preempt_conditional_cli(struct pt_regs *regs)
27937 }
27938
27939 static int __kprobes
27940-do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
27941+do_trap_no_signal(struct task_struct *tsk, int trapnr, const char *str,
27942 struct pt_regs *regs, long error_code)
27943 {
27944 #ifdef CONFIG_X86_32
27945- if (regs->flags & X86_VM_MASK) {
27946+ if (v8086_mode(regs)) {
27947 /*
27948 * Traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
27949 * On nmi (interrupt 2), do_trap should not be called.
27950@@ -124,12 +124,24 @@ do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
27951 return -1;
27952 }
27953 #endif
27954- if (!user_mode(regs)) {
27955+ if (!user_mode_novm(regs)) {
27956 if (!fixup_exception(regs)) {
27957 tsk->thread.error_code = error_code;
27958 tsk->thread.trap_nr = trapnr;
27959+
27960+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
27961+ if (trapnr == X86_TRAP_SS && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
27962+ str = "PAX: suspicious stack segment fault";
27963+#endif
27964+
27965 die(str, regs, error_code);
27966 }
27967+
27968+#ifdef CONFIG_PAX_REFCOUNT
27969+ if (trapnr == X86_TRAP_OF)
27970+ pax_report_refcount_overflow(regs);
27971+#endif
27972+
27973 return 0;
27974 }
27975
27976@@ -137,7 +149,7 @@ do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
27977 }
27978
27979 static void __kprobes
27980-do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
27981+do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
27982 long error_code, siginfo_t *info)
27983 {
27984 struct task_struct *tsk = current;
27985@@ -161,7 +173,7 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
27986 if (show_unhandled_signals && unhandled_signal(tsk, signr) &&
27987 printk_ratelimit()) {
27988 pr_info("%s[%d] trap %s ip:%lx sp:%lx error:%lx",
27989- tsk->comm, tsk->pid, str,
27990+ tsk->comm, task_pid_nr(tsk), str,
27991 regs->ip, regs->sp, error_code);
27992 print_vma_addr(" in ", regs->ip);
27993 pr_cont("\n");
27994@@ -251,6 +263,11 @@ dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code)
27995 tsk->thread.error_code = error_code;
27996 tsk->thread.trap_nr = X86_TRAP_DF;
27997
27998+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
27999+ if ((unsigned long)tsk->stack - regs->sp <= PAGE_SIZE)
28000+ die("grsec: kernel stack overflow detected", regs, error_code);
28001+#endif
28002+
28003 #ifdef CONFIG_DOUBLEFAULT
28004 df_debug(regs, error_code);
28005 #endif
28006@@ -273,7 +290,7 @@ do_general_protection(struct pt_regs *regs, long error_code)
28007 conditional_sti(regs);
28008
28009 #ifdef CONFIG_X86_32
28010- if (regs->flags & X86_VM_MASK) {
28011+ if (v8086_mode(regs)) {
28012 local_irq_enable();
28013 handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code);
28014 goto exit;
28015@@ -281,18 +298,42 @@ do_general_protection(struct pt_regs *regs, long error_code)
28016 #endif
28017
28018 tsk = current;
28019- if (!user_mode(regs)) {
28020+ if (!user_mode_novm(regs)) {
28021 if (fixup_exception(regs))
28022 goto exit;
28023
28024 tsk->thread.error_code = error_code;
28025 tsk->thread.trap_nr = X86_TRAP_GP;
28026 if (notify_die(DIE_GPF, "general protection fault", regs, error_code,
28027- X86_TRAP_GP, SIGSEGV) != NOTIFY_STOP)
28028+ X86_TRAP_GP, SIGSEGV) != NOTIFY_STOP) {
28029+
28030+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
28031+ if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
28032+ die("PAX: suspicious general protection fault", regs, error_code);
28033+ else
28034+#endif
28035+
28036 die("general protection fault", regs, error_code);
28037+ }
28038 goto exit;
28039 }
28040
28041+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
28042+ if (!(__supported_pte_mask & _PAGE_NX) && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
28043+ struct mm_struct *mm = tsk->mm;
28044+ unsigned long limit;
28045+
28046+ down_write(&mm->mmap_sem);
28047+ limit = mm->context.user_cs_limit;
28048+ if (limit < TASK_SIZE) {
28049+ track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
28050+ up_write(&mm->mmap_sem);
28051+ return;
28052+ }
28053+ up_write(&mm->mmap_sem);
28054+ }
28055+#endif
28056+
28057 tsk->thread.error_code = error_code;
28058 tsk->thread.trap_nr = X86_TRAP_GP;
28059
28060@@ -453,7 +494,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
28061 /* It's safe to allow irq's after DR6 has been saved */
28062 preempt_conditional_sti(regs);
28063
28064- if (regs->flags & X86_VM_MASK) {
28065+ if (v8086_mode(regs)) {
28066 handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code,
28067 X86_TRAP_DB);
28068 preempt_conditional_cli(regs);
28069@@ -468,7 +509,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
28070 * We already checked v86 mode above, so we can check for kernel mode
28071 * by just checking the CPL of CS.
28072 */
28073- if ((dr6 & DR_STEP) && !user_mode(regs)) {
28074+ if ((dr6 & DR_STEP) && !user_mode_novm(regs)) {
28075 tsk->thread.debugreg6 &= ~DR_STEP;
28076 set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
28077 regs->flags &= ~X86_EFLAGS_TF;
28078@@ -500,7 +541,7 @@ void math_error(struct pt_regs *regs, int error_code, int trapnr)
28079 return;
28080 conditional_sti(regs);
28081
28082- if (!user_mode_vm(regs))
28083+ if (!user_mode(regs))
28084 {
28085 if (!fixup_exception(regs)) {
28086 task->thread.error_code = error_code;
28087diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
28088index cfbe99f..a6e8fa7 100644
28089--- a/arch/x86/kernel/tsc.c
28090+++ b/arch/x86/kernel/tsc.c
28091@@ -150,7 +150,7 @@ static void cyc2ns_write_end(int cpu, struct cyc2ns_data *data)
28092 */
28093 smp_wmb();
28094
28095- ACCESS_ONCE(c2n->head) = data;
28096+ ACCESS_ONCE_RW(c2n->head) = data;
28097 }
28098
28099 /*
28100diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c
28101index 2ed8459..7cf329f 100644
28102--- a/arch/x86/kernel/uprobes.c
28103+++ b/arch/x86/kernel/uprobes.c
28104@@ -629,7 +629,7 @@ int arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val,
28105 int ret = NOTIFY_DONE;
28106
28107 /* We are only interested in userspace traps */
28108- if (regs && !user_mode_vm(regs))
28109+ if (regs && !user_mode(regs))
28110 return NOTIFY_DONE;
28111
28112 switch (val) {
28113@@ -719,7 +719,7 @@ arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs
28114
28115 if (ncopied != rasize) {
28116 pr_err("uprobe: return address clobbered: pid=%d, %%sp=%#lx, "
28117- "%%ip=%#lx\n", current->pid, regs->sp, regs->ip);
28118+ "%%ip=%#lx\n", task_pid_nr(current), regs->sp, regs->ip);
28119
28120 force_sig_info(SIGSEGV, SEND_SIG_FORCED, current);
28121 }
28122diff --git a/arch/x86/kernel/verify_cpu.S b/arch/x86/kernel/verify_cpu.S
28123index b9242ba..50c5edd 100644
28124--- a/arch/x86/kernel/verify_cpu.S
28125+++ b/arch/x86/kernel/verify_cpu.S
28126@@ -20,6 +20,7 @@
28127 * arch/x86/boot/compressed/head_64.S: Boot cpu verification
28128 * arch/x86/kernel/trampoline_64.S: secondary processor verification
28129 * arch/x86/kernel/head_32.S: processor startup
28130+ * arch/x86/kernel/acpi/realmode/wakeup.S: 32bit processor resume
28131 *
28132 * verify_cpu, returns the status of longmode and SSE in register %eax.
28133 * 0: Success 1: Failure
28134diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
28135index e8edcf5..27f9344 100644
28136--- a/arch/x86/kernel/vm86_32.c
28137+++ b/arch/x86/kernel/vm86_32.c
28138@@ -44,6 +44,7 @@
28139 #include <linux/ptrace.h>
28140 #include <linux/audit.h>
28141 #include <linux/stddef.h>
28142+#include <linux/grsecurity.h>
28143
28144 #include <asm/uaccess.h>
28145 #include <asm/io.h>
28146@@ -150,7 +151,7 @@ struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs)
28147 do_exit(SIGSEGV);
28148 }
28149
28150- tss = &per_cpu(init_tss, get_cpu());
28151+ tss = init_tss + get_cpu();
28152 current->thread.sp0 = current->thread.saved_sp0;
28153 current->thread.sysenter_cs = __KERNEL_CS;
28154 load_sp0(tss, &current->thread);
28155@@ -214,6 +215,14 @@ SYSCALL_DEFINE1(vm86old, struct vm86_struct __user *, v86)
28156
28157 if (tsk->thread.saved_sp0)
28158 return -EPERM;
28159+
28160+#ifdef CONFIG_GRKERNSEC_VM86
28161+ if (!capable(CAP_SYS_RAWIO)) {
28162+ gr_handle_vm86();
28163+ return -EPERM;
28164+ }
28165+#endif
28166+
28167 tmp = copy_vm86_regs_from_user(&info.regs, &v86->regs,
28168 offsetof(struct kernel_vm86_struct, vm86plus) -
28169 sizeof(info.regs));
28170@@ -238,6 +247,13 @@ SYSCALL_DEFINE2(vm86, unsigned long, cmd, unsigned long, arg)
28171 int tmp;
28172 struct vm86plus_struct __user *v86;
28173
28174+#ifdef CONFIG_GRKERNSEC_VM86
28175+ if (!capable(CAP_SYS_RAWIO)) {
28176+ gr_handle_vm86();
28177+ return -EPERM;
28178+ }
28179+#endif
28180+
28181 tsk = current;
28182 switch (cmd) {
28183 case VM86_REQUEST_IRQ:
28184@@ -318,7 +334,7 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
28185 tsk->thread.saved_fs = info->regs32->fs;
28186 tsk->thread.saved_gs = get_user_gs(info->regs32);
28187
28188- tss = &per_cpu(init_tss, get_cpu());
28189+ tss = init_tss + get_cpu();
28190 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
28191 if (cpu_has_sep)
28192 tsk->thread.sysenter_cs = 0;
28193@@ -525,7 +541,7 @@ static void do_int(struct kernel_vm86_regs *regs, int i,
28194 goto cannot_handle;
28195 if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
28196 goto cannot_handle;
28197- intr_ptr = (unsigned long __user *) (i << 2);
28198+ intr_ptr = (__force unsigned long __user *) (i << 2);
28199 if (get_user(segoffs, intr_ptr))
28200 goto cannot_handle;
28201 if ((segoffs >> 16) == BIOSSEG)
28202diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
28203index da6b35a..977e9cf 100644
28204--- a/arch/x86/kernel/vmlinux.lds.S
28205+++ b/arch/x86/kernel/vmlinux.lds.S
28206@@ -26,6 +26,13 @@
28207 #include <asm/page_types.h>
28208 #include <asm/cache.h>
28209 #include <asm/boot.h>
28210+#include <asm/segment.h>
28211+
28212+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
28213+#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
28214+#else
28215+#define __KERNEL_TEXT_OFFSET 0
28216+#endif
28217
28218 #undef i386 /* in case the preprocessor is a 32bit one */
28219
28220@@ -69,30 +76,43 @@ jiffies_64 = jiffies;
28221
28222 PHDRS {
28223 text PT_LOAD FLAGS(5); /* R_E */
28224+#ifdef CONFIG_X86_32
28225+ module PT_LOAD FLAGS(5); /* R_E */
28226+#endif
28227+#ifdef CONFIG_XEN
28228+ rodata PT_LOAD FLAGS(5); /* R_E */
28229+#else
28230+ rodata PT_LOAD FLAGS(4); /* R__ */
28231+#endif
28232 data PT_LOAD FLAGS(6); /* RW_ */
28233-#ifdef CONFIG_X86_64
28234+ init.begin PT_LOAD FLAGS(6); /* RW_ */
28235 #ifdef CONFIG_SMP
28236 percpu PT_LOAD FLAGS(6); /* RW_ */
28237 #endif
28238+ text.init PT_LOAD FLAGS(5); /* R_E */
28239+ text.exit PT_LOAD FLAGS(5); /* R_E */
28240 init PT_LOAD FLAGS(7); /* RWE */
28241-#endif
28242 note PT_NOTE FLAGS(0); /* ___ */
28243 }
28244
28245 SECTIONS
28246 {
28247 #ifdef CONFIG_X86_32
28248- . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
28249- phys_startup_32 = startup_32 - LOAD_OFFSET;
28250+ . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
28251 #else
28252- . = __START_KERNEL;
28253- phys_startup_64 = startup_64 - LOAD_OFFSET;
28254+ . = __START_KERNEL;
28255 #endif
28256
28257 /* Text and read-only data */
28258- .text : AT(ADDR(.text) - LOAD_OFFSET) {
28259- _text = .;
28260+ .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
28261 /* bootstrapping code */
28262+#ifdef CONFIG_X86_32
28263+ phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
28264+#else
28265+ phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
28266+#endif
28267+ __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
28268+ _text = .;
28269 HEAD_TEXT
28270 . = ALIGN(8);
28271 _stext = .;
28272@@ -104,13 +124,47 @@ SECTIONS
28273 IRQENTRY_TEXT
28274 *(.fixup)
28275 *(.gnu.warning)
28276- /* End of text section */
28277- _etext = .;
28278 } :text = 0x9090
28279
28280- NOTES :text :note
28281+ . += __KERNEL_TEXT_OFFSET;
28282
28283- EXCEPTION_TABLE(16) :text = 0x9090
28284+#ifdef CONFIG_X86_32
28285+ . = ALIGN(PAGE_SIZE);
28286+ .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
28287+
28288+#ifdef CONFIG_PAX_KERNEXEC
28289+ MODULES_EXEC_VADDR = .;
28290+ BYTE(0)
28291+ . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
28292+ . = ALIGN(HPAGE_SIZE) - 1;
28293+ MODULES_EXEC_END = .;
28294+#endif
28295+
28296+ } :module
28297+#endif
28298+
28299+ .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
28300+ /* End of text section */
28301+ BYTE(0)
28302+ _etext = . - __KERNEL_TEXT_OFFSET;
28303+ }
28304+
28305+#ifdef CONFIG_X86_32
28306+ . = ALIGN(PAGE_SIZE);
28307+ .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
28308+ . = ALIGN(PAGE_SIZE);
28309+ *(.empty_zero_page)
28310+ *(.initial_pg_fixmap)
28311+ *(.initial_pg_pmd)
28312+ *(.initial_page_table)
28313+ *(.swapper_pg_dir)
28314+ } :rodata
28315+#endif
28316+
28317+ . = ALIGN(PAGE_SIZE);
28318+ NOTES :rodata :note
28319+
28320+ EXCEPTION_TABLE(16) :rodata
28321
28322 #if defined(CONFIG_DEBUG_RODATA)
28323 /* .text should occupy whole number of pages */
28324@@ -122,16 +176,20 @@ SECTIONS
28325
28326 /* Data */
28327 .data : AT(ADDR(.data) - LOAD_OFFSET) {
28328+
28329+#ifdef CONFIG_PAX_KERNEXEC
28330+ . = ALIGN(HPAGE_SIZE);
28331+#else
28332+ . = ALIGN(PAGE_SIZE);
28333+#endif
28334+
28335 /* Start of data section */
28336 _sdata = .;
28337
28338 /* init_task */
28339 INIT_TASK_DATA(THREAD_SIZE)
28340
28341-#ifdef CONFIG_X86_32
28342- /* 32 bit has nosave before _edata */
28343 NOSAVE_DATA
28344-#endif
28345
28346 PAGE_ALIGNED_DATA(PAGE_SIZE)
28347
28348@@ -172,12 +230,19 @@ SECTIONS
28349 #endif /* CONFIG_X86_64 */
28350
28351 /* Init code and data - will be freed after init */
28352- . = ALIGN(PAGE_SIZE);
28353 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
28354+ BYTE(0)
28355+
28356+#ifdef CONFIG_PAX_KERNEXEC
28357+ . = ALIGN(HPAGE_SIZE);
28358+#else
28359+ . = ALIGN(PAGE_SIZE);
28360+#endif
28361+
28362 __init_begin = .; /* paired with __init_end */
28363- }
28364+ } :init.begin
28365
28366-#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
28367+#ifdef CONFIG_SMP
28368 /*
28369 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
28370 * output PHDR, so the next output section - .init.text - should
28371@@ -186,12 +251,27 @@ SECTIONS
28372 PERCPU_VADDR(INTERNODE_CACHE_BYTES, 0, :percpu)
28373 #endif
28374
28375- INIT_TEXT_SECTION(PAGE_SIZE)
28376-#ifdef CONFIG_X86_64
28377- :init
28378-#endif
28379+ . = ALIGN(PAGE_SIZE);
28380+ init_begin = .;
28381+ .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
28382+ VMLINUX_SYMBOL(_sinittext) = .;
28383+ INIT_TEXT
28384+ VMLINUX_SYMBOL(_einittext) = .;
28385+ . = ALIGN(PAGE_SIZE);
28386+ } :text.init
28387
28388- INIT_DATA_SECTION(16)
28389+ /*
28390+ * .exit.text is discard at runtime, not link time, to deal with
28391+ * references from .altinstructions and .eh_frame
28392+ */
28393+ .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
28394+ EXIT_TEXT
28395+ . = ALIGN(16);
28396+ } :text.exit
28397+ . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
28398+
28399+ . = ALIGN(PAGE_SIZE);
28400+ INIT_DATA_SECTION(16) :init
28401
28402 .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) {
28403 __x86_cpu_dev_start = .;
28404@@ -262,19 +342,12 @@ SECTIONS
28405 }
28406
28407 . = ALIGN(8);
28408- /*
28409- * .exit.text is discard at runtime, not link time, to deal with
28410- * references from .altinstructions and .eh_frame
28411- */
28412- .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
28413- EXIT_TEXT
28414- }
28415
28416 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
28417 EXIT_DATA
28418 }
28419
28420-#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
28421+#ifndef CONFIG_SMP
28422 PERCPU_SECTION(INTERNODE_CACHE_BYTES)
28423 #endif
28424
28425@@ -293,16 +366,10 @@ SECTIONS
28426 .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
28427 __smp_locks = .;
28428 *(.smp_locks)
28429- . = ALIGN(PAGE_SIZE);
28430 __smp_locks_end = .;
28431+ . = ALIGN(PAGE_SIZE);
28432 }
28433
28434-#ifdef CONFIG_X86_64
28435- .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
28436- NOSAVE_DATA
28437- }
28438-#endif
28439-
28440 /* BSS */
28441 . = ALIGN(PAGE_SIZE);
28442 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
28443@@ -318,6 +385,7 @@ SECTIONS
28444 __brk_base = .;
28445 . += 64 * 1024; /* 64k alignment slop space */
28446 *(.brk_reservation) /* areas brk users have reserved */
28447+ . = ALIGN(HPAGE_SIZE);
28448 __brk_limit = .;
28449 }
28450
28451@@ -344,13 +412,12 @@ SECTIONS
28452 * for the boot processor.
28453 */
28454 #define INIT_PER_CPU(x) init_per_cpu__##x = x + __per_cpu_load
28455-INIT_PER_CPU(gdt_page);
28456 INIT_PER_CPU(irq_stack_union);
28457
28458 /*
28459 * Build-time check on the image size:
28460 */
28461-. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
28462+. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
28463 "kernel image bigger than KERNEL_IMAGE_SIZE");
28464
28465 #ifdef CONFIG_SMP
28466diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
28467index 1f96f93..6f29be7 100644
28468--- a/arch/x86/kernel/vsyscall_64.c
28469+++ b/arch/x86/kernel/vsyscall_64.c
28470@@ -56,15 +56,13 @@
28471 DEFINE_VVAR(int, vgetcpu_mode);
28472 DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data);
28473
28474-static enum { EMULATE, NATIVE, NONE } vsyscall_mode = EMULATE;
28475+static enum { EMULATE, NONE } vsyscall_mode = EMULATE;
28476
28477 static int __init vsyscall_setup(char *str)
28478 {
28479 if (str) {
28480 if (!strcmp("emulate", str))
28481 vsyscall_mode = EMULATE;
28482- else if (!strcmp("native", str))
28483- vsyscall_mode = NATIVE;
28484 else if (!strcmp("none", str))
28485 vsyscall_mode = NONE;
28486 else
28487@@ -101,7 +99,7 @@ void update_vsyscall(struct timekeeper *tk)
28488 vdata->monotonic_time_sec = tk->xtime_sec
28489 + tk->wall_to_monotonic.tv_sec;
28490 vdata->monotonic_time_snsec = tk->xtime_nsec
28491- + (tk->wall_to_monotonic.tv_nsec
28492+ + ((u64)tk->wall_to_monotonic.tv_nsec
28493 << tk->shift);
28494 while (vdata->monotonic_time_snsec >=
28495 (((u64)NSEC_PER_SEC) << tk->shift)) {
28496@@ -323,8 +321,7 @@ do_ret:
28497 return true;
28498
28499 sigsegv:
28500- force_sig(SIGSEGV, current);
28501- return true;
28502+ do_group_exit(SIGKILL);
28503 }
28504
28505 /*
28506@@ -377,10 +374,7 @@ void __init map_vsyscall(void)
28507 extern char __vvar_page;
28508 unsigned long physaddr_vvar_page = __pa_symbol(&__vvar_page);
28509
28510- __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall,
28511- vsyscall_mode == NATIVE
28512- ? PAGE_KERNEL_VSYSCALL
28513- : PAGE_KERNEL_VVAR);
28514+ __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall, PAGE_KERNEL_VVAR);
28515 BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_FIRST_PAGE) !=
28516 (unsigned long)VSYSCALL_START);
28517
28518diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c
28519index 04068192..4d75aa6 100644
28520--- a/arch/x86/kernel/x8664_ksyms_64.c
28521+++ b/arch/x86/kernel/x8664_ksyms_64.c
28522@@ -34,8 +34,6 @@ EXPORT_SYMBOL(copy_user_generic_string);
28523 EXPORT_SYMBOL(copy_user_generic_unrolled);
28524 EXPORT_SYMBOL(copy_user_enhanced_fast_string);
28525 EXPORT_SYMBOL(__copy_user_nocache);
28526-EXPORT_SYMBOL(_copy_from_user);
28527-EXPORT_SYMBOL(_copy_to_user);
28528
28529 EXPORT_SYMBOL(copy_page);
28530 EXPORT_SYMBOL(clear_page);
28531@@ -73,3 +71,7 @@ EXPORT_SYMBOL(___preempt_schedule);
28532 EXPORT_SYMBOL(___preempt_schedule_context);
28533 #endif
28534 #endif
28535+
28536+#ifdef CONFIG_PAX_PER_CPU_PGD
28537+EXPORT_SYMBOL(cpu_pgd);
28538+#endif
28539diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c
28540index e48b674..a451dd9 100644
28541--- a/arch/x86/kernel/x86_init.c
28542+++ b/arch/x86/kernel/x86_init.c
28543@@ -93,7 +93,7 @@ struct x86_cpuinit_ops x86_cpuinit = {
28544 static void default_nmi_init(void) { };
28545 static int default_i8042_detect(void) { return 1; };
28546
28547-struct x86_platform_ops x86_platform = {
28548+struct x86_platform_ops x86_platform __read_only = {
28549 .calibrate_tsc = native_calibrate_tsc,
28550 .get_wallclock = mach_get_cmos_time,
28551 .set_wallclock = mach_set_rtc_mmss,
28552@@ -109,7 +109,7 @@ struct x86_platform_ops x86_platform = {
28553 EXPORT_SYMBOL_GPL(x86_platform);
28554
28555 #if defined(CONFIG_PCI_MSI)
28556-struct x86_msi_ops x86_msi = {
28557+struct x86_msi_ops x86_msi __read_only = {
28558 .setup_msi_irqs = native_setup_msi_irqs,
28559 .compose_msi_msg = native_compose_msi_msg,
28560 .teardown_msi_irq = native_teardown_msi_irq,
28561@@ -150,7 +150,7 @@ u32 arch_msix_mask_irq(struct msi_desc *desc, u32 flag)
28562 }
28563 #endif
28564
28565-struct x86_io_apic_ops x86_io_apic_ops = {
28566+struct x86_io_apic_ops x86_io_apic_ops __read_only = {
28567 .init = native_io_apic_init_mappings,
28568 .read = native_io_apic_read,
28569 .write = native_io_apic_write,
28570diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
28571index a4b451c..8dfe1ad 100644
28572--- a/arch/x86/kernel/xsave.c
28573+++ b/arch/x86/kernel/xsave.c
28574@@ -164,18 +164,18 @@ static inline int save_xstate_epilog(void __user *buf, int ia32_frame)
28575
28576 /* Setup the bytes not touched by the [f]xsave and reserved for SW. */
28577 sw_bytes = ia32_frame ? &fx_sw_reserved_ia32 : &fx_sw_reserved;
28578- err = __copy_to_user(&x->i387.sw_reserved, sw_bytes, sizeof(*sw_bytes));
28579+ err = __copy_to_user(x->i387.sw_reserved, sw_bytes, sizeof(*sw_bytes));
28580
28581 if (!use_xsave())
28582 return err;
28583
28584- err |= __put_user(FP_XSTATE_MAGIC2, (__u32 *)(buf + xstate_size));
28585+ err |= __put_user(FP_XSTATE_MAGIC2, (__u32 __user *)(buf + xstate_size));
28586
28587 /*
28588 * Read the xstate_bv which we copied (directly from the cpu or
28589 * from the state in task struct) to the user buffers.
28590 */
28591- err |= __get_user(xstate_bv, (__u32 *)&x->xsave_hdr.xstate_bv);
28592+ err |= __get_user(xstate_bv, (__u32 __user *)&x->xsave_hdr.xstate_bv);
28593
28594 /*
28595 * For legacy compatible, we always set FP/SSE bits in the bit
28596@@ -190,7 +190,7 @@ static inline int save_xstate_epilog(void __user *buf, int ia32_frame)
28597 */
28598 xstate_bv |= XSTATE_FPSSE;
28599
28600- err |= __put_user(xstate_bv, (__u32 *)&x->xsave_hdr.xstate_bv);
28601+ err |= __put_user(xstate_bv, (__u32 __user *)&x->xsave_hdr.xstate_bv);
28602
28603 return err;
28604 }
28605@@ -199,6 +199,7 @@ static inline int save_user_xstate(struct xsave_struct __user *buf)
28606 {
28607 int err;
28608
28609+ buf = (struct xsave_struct __user *)____m(buf);
28610 if (use_xsave())
28611 err = xsave_user(buf);
28612 else if (use_fxsr())
28613@@ -311,6 +312,7 @@ sanitize_restored_xstate(struct task_struct *tsk,
28614 */
28615 static inline int restore_user_xstate(void __user *buf, u64 xbv, int fx_only)
28616 {
28617+ buf = (void __user *)____m(buf);
28618 if (use_xsave()) {
28619 if ((unsigned long)buf % 64 || fx_only) {
28620 u64 init_bv = pcntxt_mask & ~XSTATE_FPSSE;
28621diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
28622index c697625..a032162 100644
28623--- a/arch/x86/kvm/cpuid.c
28624+++ b/arch/x86/kvm/cpuid.c
28625@@ -156,15 +156,20 @@ int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
28626 struct kvm_cpuid2 *cpuid,
28627 struct kvm_cpuid_entry2 __user *entries)
28628 {
28629- int r;
28630+ int r, i;
28631
28632 r = -E2BIG;
28633 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
28634 goto out;
28635 r = -EFAULT;
28636- if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
28637- cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
28638+ if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
28639 goto out;
28640+ for (i = 0; i < cpuid->nent; ++i) {
28641+ struct kvm_cpuid_entry2 cpuid_entry;
28642+ if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
28643+ goto out;
28644+ vcpu->arch.cpuid_entries[i] = cpuid_entry;
28645+ }
28646 vcpu->arch.cpuid_nent = cpuid->nent;
28647 kvm_apic_set_version(vcpu);
28648 kvm_x86_ops->cpuid_update(vcpu);
28649@@ -179,15 +184,19 @@ int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
28650 struct kvm_cpuid2 *cpuid,
28651 struct kvm_cpuid_entry2 __user *entries)
28652 {
28653- int r;
28654+ int r, i;
28655
28656 r = -E2BIG;
28657 if (cpuid->nent < vcpu->arch.cpuid_nent)
28658 goto out;
28659 r = -EFAULT;
28660- if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
28661- vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
28662+ if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
28663 goto out;
28664+ for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
28665+ struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
28666+ if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
28667+ goto out;
28668+ }
28669 return 0;
28670
28671 out:
28672diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
28673index 9736529..ab4f54c 100644
28674--- a/arch/x86/kvm/lapic.c
28675+++ b/arch/x86/kvm/lapic.c
28676@@ -55,7 +55,7 @@
28677 #define APIC_BUS_CYCLE_NS 1
28678
28679 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
28680-#define apic_debug(fmt, arg...)
28681+#define apic_debug(fmt, arg...) do {} while (0)
28682
28683 #define APIC_LVT_NUM 6
28684 /* 14 is the version for Xeon and Pentium 8.4.8*/
28685diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
28686index cba218a..1cc1bed 100644
28687--- a/arch/x86/kvm/paging_tmpl.h
28688+++ b/arch/x86/kvm/paging_tmpl.h
28689@@ -331,7 +331,7 @@ retry_walk:
28690 if (unlikely(kvm_is_error_hva(host_addr)))
28691 goto error;
28692
28693- ptep_user = (pt_element_t __user *)((void *)host_addr + offset);
28694+ ptep_user = (pt_element_t __force_user *)((void *)host_addr + offset);
28695 if (unlikely(__copy_from_user(&pte, ptep_user, sizeof(pte))))
28696 goto error;
28697 walker->ptep_user[walker->level - 1] = ptep_user;
28698diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
28699index 2de1bc0..22251ee 100644
28700--- a/arch/x86/kvm/svm.c
28701+++ b/arch/x86/kvm/svm.c
28702@@ -3508,7 +3508,11 @@ static void reload_tss(struct kvm_vcpu *vcpu)
28703 int cpu = raw_smp_processor_id();
28704
28705 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
28706+
28707+ pax_open_kernel();
28708 sd->tss_desc->type = 9; /* available 32/64-bit TSS */
28709+ pax_close_kernel();
28710+
28711 load_TR_desc();
28712 }
28713
28714@@ -3911,6 +3915,10 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
28715 #endif
28716 #endif
28717
28718+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
28719+ __set_fs(current_thread_info()->addr_limit);
28720+#endif
28721+
28722 reload_tss(vcpu);
28723
28724 local_irq_disable();
28725diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
28726index 3927528..fc19971 100644
28727--- a/arch/x86/kvm/vmx.c
28728+++ b/arch/x86/kvm/vmx.c
28729@@ -1320,12 +1320,12 @@ static void vmcs_write64(unsigned long field, u64 value)
28730 #endif
28731 }
28732
28733-static void vmcs_clear_bits(unsigned long field, u32 mask)
28734+static void vmcs_clear_bits(unsigned long field, unsigned long mask)
28735 {
28736 vmcs_writel(field, vmcs_readl(field) & ~mask);
28737 }
28738
28739-static void vmcs_set_bits(unsigned long field, u32 mask)
28740+static void vmcs_set_bits(unsigned long field, unsigned long mask)
28741 {
28742 vmcs_writel(field, vmcs_readl(field) | mask);
28743 }
28744@@ -1585,7 +1585,11 @@ static void reload_tss(void)
28745 struct desc_struct *descs;
28746
28747 descs = (void *)gdt->address;
28748+
28749+ pax_open_kernel();
28750 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
28751+ pax_close_kernel();
28752+
28753 load_TR_desc();
28754 }
28755
28756@@ -1809,6 +1813,10 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
28757 vmcs_writel(HOST_TR_BASE, kvm_read_tr_base()); /* 22.2.4 */
28758 vmcs_writel(HOST_GDTR_BASE, gdt->address); /* 22.2.4 */
28759
28760+#ifdef CONFIG_PAX_PER_CPU_PGD
28761+ vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */
28762+#endif
28763+
28764 rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp);
28765 vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */
28766 vmx->loaded_vmcs->cpu = cpu;
28767@@ -2098,7 +2106,7 @@ static void setup_msrs(struct vcpu_vmx *vmx)
28768 * reads and returns guest's timestamp counter "register"
28769 * guest_tsc = host_tsc + tsc_offset -- 21.3
28770 */
28771-static u64 guest_read_tsc(void)
28772+static u64 __intentional_overflow(-1) guest_read_tsc(void)
28773 {
28774 u64 host_tsc, tsc_offset;
28775
28776@@ -3024,8 +3032,11 @@ static __init int hardware_setup(void)
28777 if (!cpu_has_vmx_flexpriority())
28778 flexpriority_enabled = 0;
28779
28780- if (!cpu_has_vmx_tpr_shadow())
28781- kvm_x86_ops->update_cr8_intercept = NULL;
28782+ if (!cpu_has_vmx_tpr_shadow()) {
28783+ pax_open_kernel();
28784+ *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
28785+ pax_close_kernel();
28786+ }
28787
28788 if (enable_ept && !cpu_has_vmx_ept_2m_page())
28789 kvm_disable_largepages();
28790@@ -3036,13 +3047,15 @@ static __init int hardware_setup(void)
28791 if (!cpu_has_vmx_apicv())
28792 enable_apicv = 0;
28793
28794+ pax_open_kernel();
28795 if (enable_apicv)
28796- kvm_x86_ops->update_cr8_intercept = NULL;
28797+ *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
28798 else {
28799- kvm_x86_ops->hwapic_irr_update = NULL;
28800- kvm_x86_ops->deliver_posted_interrupt = NULL;
28801- kvm_x86_ops->sync_pir_to_irr = vmx_sync_pir_to_irr_dummy;
28802+ *(void **)&kvm_x86_ops->hwapic_irr_update = NULL;
28803+ *(void **)&kvm_x86_ops->deliver_posted_interrupt = NULL;
28804+ *(void **)&kvm_x86_ops->sync_pir_to_irr = vmx_sync_pir_to_irr_dummy;
28805 }
28806+ pax_close_kernel();
28807
28808 if (nested)
28809 nested_vmx_setup_ctls_msrs();
28810@@ -4165,7 +4178,10 @@ static void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
28811
28812 vmcs_writel(HOST_CR0, read_cr0() & ~X86_CR0_TS); /* 22.2.3 */
28813 vmcs_writel(HOST_CR4, read_cr4()); /* 22.2.3, 22.2.5 */
28814+
28815+#ifndef CONFIG_PAX_PER_CPU_PGD
28816 vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */
28817+#endif
28818
28819 vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS); /* 22.2.4 */
28820 #ifdef CONFIG_X86_64
28821@@ -4187,7 +4203,7 @@ static void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
28822 vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */
28823 vmx->host_idt_base = dt.address;
28824
28825- vmcs_writel(HOST_RIP, vmx_return); /* 22.2.5 */
28826+ vmcs_writel(HOST_RIP, ktla_ktva(vmx_return)); /* 22.2.5 */
28827
28828 rdmsr(MSR_IA32_SYSENTER_CS, low32, high32);
28829 vmcs_write32(HOST_IA32_SYSENTER_CS, low32);
28830@@ -7265,6 +7281,12 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
28831 "jmp 2f \n\t"
28832 "1: " __ex(ASM_VMX_VMRESUME) "\n\t"
28833 "2: "
28834+
28835+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
28836+ "ljmp %[cs],$3f\n\t"
28837+ "3: "
28838+#endif
28839+
28840 /* Save guest registers, load host registers, keep flags */
28841 "mov %0, %c[wordsize](%%" _ASM_SP ") \n\t"
28842 "pop %0 \n\t"
28843@@ -7317,6 +7339,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
28844 #endif
28845 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)),
28846 [wordsize]"i"(sizeof(ulong))
28847+
28848+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
28849+ ,[cs]"i"(__KERNEL_CS)
28850+#endif
28851+
28852 : "cc", "memory"
28853 #ifdef CONFIG_X86_64
28854 , "rax", "rbx", "rdi", "rsi"
28855@@ -7330,7 +7357,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
28856 if (debugctlmsr)
28857 update_debugctlmsr(debugctlmsr);
28858
28859-#ifndef CONFIG_X86_64
28860+#ifdef CONFIG_X86_32
28861 /*
28862 * The sysexit path does not restore ds/es, so we must set them to
28863 * a reasonable value ourselves.
28864@@ -7339,8 +7366,18 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
28865 * may be executed in interrupt context, which saves and restore segments
28866 * around it, nullifying its effect.
28867 */
28868- loadsegment(ds, __USER_DS);
28869- loadsegment(es, __USER_DS);
28870+ loadsegment(ds, __KERNEL_DS);
28871+ loadsegment(es, __KERNEL_DS);
28872+ loadsegment(ss, __KERNEL_DS);
28873+
28874+#ifdef CONFIG_PAX_KERNEXEC
28875+ loadsegment(fs, __KERNEL_PERCPU);
28876+#endif
28877+
28878+#ifdef CONFIG_PAX_MEMORY_UDEREF
28879+ __set_fs(current_thread_info()->addr_limit);
28880+#endif
28881+
28882 #endif
28883
28884 vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP)
28885diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
28886index ee0c3b5..773bb94 100644
28887--- a/arch/x86/kvm/x86.c
28888+++ b/arch/x86/kvm/x86.c
28889@@ -1776,8 +1776,8 @@ static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
28890 {
28891 struct kvm *kvm = vcpu->kvm;
28892 int lm = is_long_mode(vcpu);
28893- u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64
28894- : (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
28895+ u8 __user *blob_addr = lm ? (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_64
28896+ : (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
28897 u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
28898 : kvm->arch.xen_hvm_config.blob_size_32;
28899 u32 page_num = data & ~PAGE_MASK;
28900@@ -2688,6 +2688,8 @@ long kvm_arch_dev_ioctl(struct file *filp,
28901 if (n < msr_list.nmsrs)
28902 goto out;
28903 r = -EFAULT;
28904+ if (num_msrs_to_save > ARRAY_SIZE(msrs_to_save))
28905+ goto out;
28906 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
28907 num_msrs_to_save * sizeof(u32)))
28908 goto out;
28909@@ -5502,7 +5504,7 @@ static struct notifier_block pvclock_gtod_notifier = {
28910 };
28911 #endif
28912
28913-int kvm_arch_init(void *opaque)
28914+int kvm_arch_init(const void *opaque)
28915 {
28916 int r;
28917 struct kvm_x86_ops *ops = opaque;
28918diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
28919index ad1fb5f..fe30b66 100644
28920--- a/arch/x86/lguest/boot.c
28921+++ b/arch/x86/lguest/boot.c
28922@@ -1206,9 +1206,10 @@ static __init int early_put_chars(u32 vtermno, const char *buf, int count)
28923 * Rebooting also tells the Host we're finished, but the RESTART flag tells the
28924 * Launcher to reboot us.
28925 */
28926-static void lguest_restart(char *reason)
28927+static __noreturn void lguest_restart(char *reason)
28928 {
28929 hcall(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART, 0, 0);
28930+ BUG();
28931 }
28932
28933 /*G:050
28934diff --git a/arch/x86/lib/atomic64_386_32.S b/arch/x86/lib/atomic64_386_32.S
28935index 00933d5..3a64af9 100644
28936--- a/arch/x86/lib/atomic64_386_32.S
28937+++ b/arch/x86/lib/atomic64_386_32.S
28938@@ -48,6 +48,10 @@ BEGIN(read)
28939 movl (v), %eax
28940 movl 4(v), %edx
28941 RET_ENDP
28942+BEGIN(read_unchecked)
28943+ movl (v), %eax
28944+ movl 4(v), %edx
28945+RET_ENDP
28946 #undef v
28947
28948 #define v %esi
28949@@ -55,6 +59,10 @@ BEGIN(set)
28950 movl %ebx, (v)
28951 movl %ecx, 4(v)
28952 RET_ENDP
28953+BEGIN(set_unchecked)
28954+ movl %ebx, (v)
28955+ movl %ecx, 4(v)
28956+RET_ENDP
28957 #undef v
28958
28959 #define v %esi
28960@@ -70,6 +78,20 @@ RET_ENDP
28961 BEGIN(add)
28962 addl %eax, (v)
28963 adcl %edx, 4(v)
28964+
28965+#ifdef CONFIG_PAX_REFCOUNT
28966+ jno 0f
28967+ subl %eax, (v)
28968+ sbbl %edx, 4(v)
28969+ int $4
28970+0:
28971+ _ASM_EXTABLE(0b, 0b)
28972+#endif
28973+
28974+RET_ENDP
28975+BEGIN(add_unchecked)
28976+ addl %eax, (v)
28977+ adcl %edx, 4(v)
28978 RET_ENDP
28979 #undef v
28980
28981@@ -77,6 +99,24 @@ RET_ENDP
28982 BEGIN(add_return)
28983 addl (v), %eax
28984 adcl 4(v), %edx
28985+
28986+#ifdef CONFIG_PAX_REFCOUNT
28987+ into
28988+1234:
28989+ _ASM_EXTABLE(1234b, 2f)
28990+#endif
28991+
28992+ movl %eax, (v)
28993+ movl %edx, 4(v)
28994+
28995+#ifdef CONFIG_PAX_REFCOUNT
28996+2:
28997+#endif
28998+
28999+RET_ENDP
29000+BEGIN(add_return_unchecked)
29001+ addl (v), %eax
29002+ adcl 4(v), %edx
29003 movl %eax, (v)
29004 movl %edx, 4(v)
29005 RET_ENDP
29006@@ -86,6 +126,20 @@ RET_ENDP
29007 BEGIN(sub)
29008 subl %eax, (v)
29009 sbbl %edx, 4(v)
29010+
29011+#ifdef CONFIG_PAX_REFCOUNT
29012+ jno 0f
29013+ addl %eax, (v)
29014+ adcl %edx, 4(v)
29015+ int $4
29016+0:
29017+ _ASM_EXTABLE(0b, 0b)
29018+#endif
29019+
29020+RET_ENDP
29021+BEGIN(sub_unchecked)
29022+ subl %eax, (v)
29023+ sbbl %edx, 4(v)
29024 RET_ENDP
29025 #undef v
29026
29027@@ -96,6 +150,27 @@ BEGIN(sub_return)
29028 sbbl $0, %edx
29029 addl (v), %eax
29030 adcl 4(v), %edx
29031+
29032+#ifdef CONFIG_PAX_REFCOUNT
29033+ into
29034+1234:
29035+ _ASM_EXTABLE(1234b, 2f)
29036+#endif
29037+
29038+ movl %eax, (v)
29039+ movl %edx, 4(v)
29040+
29041+#ifdef CONFIG_PAX_REFCOUNT
29042+2:
29043+#endif
29044+
29045+RET_ENDP
29046+BEGIN(sub_return_unchecked)
29047+ negl %edx
29048+ negl %eax
29049+ sbbl $0, %edx
29050+ addl (v), %eax
29051+ adcl 4(v), %edx
29052 movl %eax, (v)
29053 movl %edx, 4(v)
29054 RET_ENDP
29055@@ -105,6 +180,20 @@ RET_ENDP
29056 BEGIN(inc)
29057 addl $1, (v)
29058 adcl $0, 4(v)
29059+
29060+#ifdef CONFIG_PAX_REFCOUNT
29061+ jno 0f
29062+ subl $1, (v)
29063+ sbbl $0, 4(v)
29064+ int $4
29065+0:
29066+ _ASM_EXTABLE(0b, 0b)
29067+#endif
29068+
29069+RET_ENDP
29070+BEGIN(inc_unchecked)
29071+ addl $1, (v)
29072+ adcl $0, 4(v)
29073 RET_ENDP
29074 #undef v
29075
29076@@ -114,6 +203,26 @@ BEGIN(inc_return)
29077 movl 4(v), %edx
29078 addl $1, %eax
29079 adcl $0, %edx
29080+
29081+#ifdef CONFIG_PAX_REFCOUNT
29082+ into
29083+1234:
29084+ _ASM_EXTABLE(1234b, 2f)
29085+#endif
29086+
29087+ movl %eax, (v)
29088+ movl %edx, 4(v)
29089+
29090+#ifdef CONFIG_PAX_REFCOUNT
29091+2:
29092+#endif
29093+
29094+RET_ENDP
29095+BEGIN(inc_return_unchecked)
29096+ movl (v), %eax
29097+ movl 4(v), %edx
29098+ addl $1, %eax
29099+ adcl $0, %edx
29100 movl %eax, (v)
29101 movl %edx, 4(v)
29102 RET_ENDP
29103@@ -123,6 +232,20 @@ RET_ENDP
29104 BEGIN(dec)
29105 subl $1, (v)
29106 sbbl $0, 4(v)
29107+
29108+#ifdef CONFIG_PAX_REFCOUNT
29109+ jno 0f
29110+ addl $1, (v)
29111+ adcl $0, 4(v)
29112+ int $4
29113+0:
29114+ _ASM_EXTABLE(0b, 0b)
29115+#endif
29116+
29117+RET_ENDP
29118+BEGIN(dec_unchecked)
29119+ subl $1, (v)
29120+ sbbl $0, 4(v)
29121 RET_ENDP
29122 #undef v
29123
29124@@ -132,6 +255,26 @@ BEGIN(dec_return)
29125 movl 4(v), %edx
29126 subl $1, %eax
29127 sbbl $0, %edx
29128+
29129+#ifdef CONFIG_PAX_REFCOUNT
29130+ into
29131+1234:
29132+ _ASM_EXTABLE(1234b, 2f)
29133+#endif
29134+
29135+ movl %eax, (v)
29136+ movl %edx, 4(v)
29137+
29138+#ifdef CONFIG_PAX_REFCOUNT
29139+2:
29140+#endif
29141+
29142+RET_ENDP
29143+BEGIN(dec_return_unchecked)
29144+ movl (v), %eax
29145+ movl 4(v), %edx
29146+ subl $1, %eax
29147+ sbbl $0, %edx
29148 movl %eax, (v)
29149 movl %edx, 4(v)
29150 RET_ENDP
29151@@ -143,6 +286,13 @@ BEGIN(add_unless)
29152 adcl %edx, %edi
29153 addl (v), %eax
29154 adcl 4(v), %edx
29155+
29156+#ifdef CONFIG_PAX_REFCOUNT
29157+ into
29158+1234:
29159+ _ASM_EXTABLE(1234b, 2f)
29160+#endif
29161+
29162 cmpl %eax, %ecx
29163 je 3f
29164 1:
29165@@ -168,6 +318,13 @@ BEGIN(inc_not_zero)
29166 1:
29167 addl $1, %eax
29168 adcl $0, %edx
29169+
29170+#ifdef CONFIG_PAX_REFCOUNT
29171+ into
29172+1234:
29173+ _ASM_EXTABLE(1234b, 2f)
29174+#endif
29175+
29176 movl %eax, (v)
29177 movl %edx, 4(v)
29178 movl $1, %eax
29179@@ -186,6 +343,13 @@ BEGIN(dec_if_positive)
29180 movl 4(v), %edx
29181 subl $1, %eax
29182 sbbl $0, %edx
29183+
29184+#ifdef CONFIG_PAX_REFCOUNT
29185+ into
29186+1234:
29187+ _ASM_EXTABLE(1234b, 1f)
29188+#endif
29189+
29190 js 1f
29191 movl %eax, (v)
29192 movl %edx, 4(v)
29193diff --git a/arch/x86/lib/atomic64_cx8_32.S b/arch/x86/lib/atomic64_cx8_32.S
29194index f5cc9eb..51fa319 100644
29195--- a/arch/x86/lib/atomic64_cx8_32.S
29196+++ b/arch/x86/lib/atomic64_cx8_32.S
29197@@ -35,10 +35,20 @@ ENTRY(atomic64_read_cx8)
29198 CFI_STARTPROC
29199
29200 read64 %ecx
29201+ pax_force_retaddr
29202 ret
29203 CFI_ENDPROC
29204 ENDPROC(atomic64_read_cx8)
29205
29206+ENTRY(atomic64_read_unchecked_cx8)
29207+ CFI_STARTPROC
29208+
29209+ read64 %ecx
29210+ pax_force_retaddr
29211+ ret
29212+ CFI_ENDPROC
29213+ENDPROC(atomic64_read_unchecked_cx8)
29214+
29215 ENTRY(atomic64_set_cx8)
29216 CFI_STARTPROC
29217
29218@@ -48,10 +58,25 @@ ENTRY(atomic64_set_cx8)
29219 cmpxchg8b (%esi)
29220 jne 1b
29221
29222+ pax_force_retaddr
29223 ret
29224 CFI_ENDPROC
29225 ENDPROC(atomic64_set_cx8)
29226
29227+ENTRY(atomic64_set_unchecked_cx8)
29228+ CFI_STARTPROC
29229+
29230+1:
29231+/* we don't need LOCK_PREFIX since aligned 64-bit writes
29232+ * are atomic on 586 and newer */
29233+ cmpxchg8b (%esi)
29234+ jne 1b
29235+
29236+ pax_force_retaddr
29237+ ret
29238+ CFI_ENDPROC
29239+ENDPROC(atomic64_set_unchecked_cx8)
29240+
29241 ENTRY(atomic64_xchg_cx8)
29242 CFI_STARTPROC
29243
29244@@ -60,12 +85,13 @@ ENTRY(atomic64_xchg_cx8)
29245 cmpxchg8b (%esi)
29246 jne 1b
29247
29248+ pax_force_retaddr
29249 ret
29250 CFI_ENDPROC
29251 ENDPROC(atomic64_xchg_cx8)
29252
29253-.macro addsub_return func ins insc
29254-ENTRY(atomic64_\func\()_return_cx8)
29255+.macro addsub_return func ins insc unchecked=""
29256+ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
29257 CFI_STARTPROC
29258 SAVE ebp
29259 SAVE ebx
29260@@ -82,27 +108,44 @@ ENTRY(atomic64_\func\()_return_cx8)
29261 movl %edx, %ecx
29262 \ins\()l %esi, %ebx
29263 \insc\()l %edi, %ecx
29264+
29265+.ifb \unchecked
29266+#ifdef CONFIG_PAX_REFCOUNT
29267+ into
29268+2:
29269+ _ASM_EXTABLE(2b, 3f)
29270+#endif
29271+.endif
29272+
29273 LOCK_PREFIX
29274 cmpxchg8b (%ebp)
29275 jne 1b
29276-
29277-10:
29278 movl %ebx, %eax
29279 movl %ecx, %edx
29280+
29281+.ifb \unchecked
29282+#ifdef CONFIG_PAX_REFCOUNT
29283+3:
29284+#endif
29285+.endif
29286+
29287 RESTORE edi
29288 RESTORE esi
29289 RESTORE ebx
29290 RESTORE ebp
29291+ pax_force_retaddr
29292 ret
29293 CFI_ENDPROC
29294-ENDPROC(atomic64_\func\()_return_cx8)
29295+ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
29296 .endm
29297
29298 addsub_return add add adc
29299 addsub_return sub sub sbb
29300+addsub_return add add adc _unchecked
29301+addsub_return sub sub sbb _unchecked
29302
29303-.macro incdec_return func ins insc
29304-ENTRY(atomic64_\func\()_return_cx8)
29305+.macro incdec_return func ins insc unchecked=""
29306+ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
29307 CFI_STARTPROC
29308 SAVE ebx
29309
29310@@ -112,21 +155,39 @@ ENTRY(atomic64_\func\()_return_cx8)
29311 movl %edx, %ecx
29312 \ins\()l $1, %ebx
29313 \insc\()l $0, %ecx
29314+
29315+.ifb \unchecked
29316+#ifdef CONFIG_PAX_REFCOUNT
29317+ into
29318+2:
29319+ _ASM_EXTABLE(2b, 3f)
29320+#endif
29321+.endif
29322+
29323 LOCK_PREFIX
29324 cmpxchg8b (%esi)
29325 jne 1b
29326
29327-10:
29328 movl %ebx, %eax
29329 movl %ecx, %edx
29330+
29331+.ifb \unchecked
29332+#ifdef CONFIG_PAX_REFCOUNT
29333+3:
29334+#endif
29335+.endif
29336+
29337 RESTORE ebx
29338+ pax_force_retaddr
29339 ret
29340 CFI_ENDPROC
29341-ENDPROC(atomic64_\func\()_return_cx8)
29342+ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
29343 .endm
29344
29345 incdec_return inc add adc
29346 incdec_return dec sub sbb
29347+incdec_return inc add adc _unchecked
29348+incdec_return dec sub sbb _unchecked
29349
29350 ENTRY(atomic64_dec_if_positive_cx8)
29351 CFI_STARTPROC
29352@@ -138,6 +199,13 @@ ENTRY(atomic64_dec_if_positive_cx8)
29353 movl %edx, %ecx
29354 subl $1, %ebx
29355 sbb $0, %ecx
29356+
29357+#ifdef CONFIG_PAX_REFCOUNT
29358+ into
29359+1234:
29360+ _ASM_EXTABLE(1234b, 2f)
29361+#endif
29362+
29363 js 2f
29364 LOCK_PREFIX
29365 cmpxchg8b (%esi)
29366@@ -147,6 +215,7 @@ ENTRY(atomic64_dec_if_positive_cx8)
29367 movl %ebx, %eax
29368 movl %ecx, %edx
29369 RESTORE ebx
29370+ pax_force_retaddr
29371 ret
29372 CFI_ENDPROC
29373 ENDPROC(atomic64_dec_if_positive_cx8)
29374@@ -171,6 +240,13 @@ ENTRY(atomic64_add_unless_cx8)
29375 movl %edx, %ecx
29376 addl %ebp, %ebx
29377 adcl %edi, %ecx
29378+
29379+#ifdef CONFIG_PAX_REFCOUNT
29380+ into
29381+1234:
29382+ _ASM_EXTABLE(1234b, 3f)
29383+#endif
29384+
29385 LOCK_PREFIX
29386 cmpxchg8b (%esi)
29387 jne 1b
29388@@ -181,6 +257,7 @@ ENTRY(atomic64_add_unless_cx8)
29389 CFI_ADJUST_CFA_OFFSET -8
29390 RESTORE ebx
29391 RESTORE ebp
29392+ pax_force_retaddr
29393 ret
29394 4:
29395 cmpl %edx, 4(%esp)
29396@@ -203,6 +280,13 @@ ENTRY(atomic64_inc_not_zero_cx8)
29397 xorl %ecx, %ecx
29398 addl $1, %ebx
29399 adcl %edx, %ecx
29400+
29401+#ifdef CONFIG_PAX_REFCOUNT
29402+ into
29403+1234:
29404+ _ASM_EXTABLE(1234b, 3f)
29405+#endif
29406+
29407 LOCK_PREFIX
29408 cmpxchg8b (%esi)
29409 jne 1b
29410@@ -210,6 +294,7 @@ ENTRY(atomic64_inc_not_zero_cx8)
29411 movl $1, %eax
29412 3:
29413 RESTORE ebx
29414+ pax_force_retaddr
29415 ret
29416 CFI_ENDPROC
29417 ENDPROC(atomic64_inc_not_zero_cx8)
29418diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S
29419index e78b8ee..7e173a8 100644
29420--- a/arch/x86/lib/checksum_32.S
29421+++ b/arch/x86/lib/checksum_32.S
29422@@ -29,7 +29,8 @@
29423 #include <asm/dwarf2.h>
29424 #include <asm/errno.h>
29425 #include <asm/asm.h>
29426-
29427+#include <asm/segment.h>
29428+
29429 /*
29430 * computes a partial checksum, e.g. for TCP/UDP fragments
29431 */
29432@@ -293,9 +294,24 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst,
29433
29434 #define ARGBASE 16
29435 #define FP 12
29436-
29437-ENTRY(csum_partial_copy_generic)
29438+
29439+ENTRY(csum_partial_copy_generic_to_user)
29440 CFI_STARTPROC
29441+
29442+#ifdef CONFIG_PAX_MEMORY_UDEREF
29443+ pushl_cfi %gs
29444+ popl_cfi %es
29445+ jmp csum_partial_copy_generic
29446+#endif
29447+
29448+ENTRY(csum_partial_copy_generic_from_user)
29449+
29450+#ifdef CONFIG_PAX_MEMORY_UDEREF
29451+ pushl_cfi %gs
29452+ popl_cfi %ds
29453+#endif
29454+
29455+ENTRY(csum_partial_copy_generic)
29456 subl $4,%esp
29457 CFI_ADJUST_CFA_OFFSET 4
29458 pushl_cfi %edi
29459@@ -317,7 +333,7 @@ ENTRY(csum_partial_copy_generic)
29460 jmp 4f
29461 SRC(1: movw (%esi), %bx )
29462 addl $2, %esi
29463-DST( movw %bx, (%edi) )
29464+DST( movw %bx, %es:(%edi) )
29465 addl $2, %edi
29466 addw %bx, %ax
29467 adcl $0, %eax
29468@@ -329,30 +345,30 @@ DST( movw %bx, (%edi) )
29469 SRC(1: movl (%esi), %ebx )
29470 SRC( movl 4(%esi), %edx )
29471 adcl %ebx, %eax
29472-DST( movl %ebx, (%edi) )
29473+DST( movl %ebx, %es:(%edi) )
29474 adcl %edx, %eax
29475-DST( movl %edx, 4(%edi) )
29476+DST( movl %edx, %es:4(%edi) )
29477
29478 SRC( movl 8(%esi), %ebx )
29479 SRC( movl 12(%esi), %edx )
29480 adcl %ebx, %eax
29481-DST( movl %ebx, 8(%edi) )
29482+DST( movl %ebx, %es:8(%edi) )
29483 adcl %edx, %eax
29484-DST( movl %edx, 12(%edi) )
29485+DST( movl %edx, %es:12(%edi) )
29486
29487 SRC( movl 16(%esi), %ebx )
29488 SRC( movl 20(%esi), %edx )
29489 adcl %ebx, %eax
29490-DST( movl %ebx, 16(%edi) )
29491+DST( movl %ebx, %es:16(%edi) )
29492 adcl %edx, %eax
29493-DST( movl %edx, 20(%edi) )
29494+DST( movl %edx, %es:20(%edi) )
29495
29496 SRC( movl 24(%esi), %ebx )
29497 SRC( movl 28(%esi), %edx )
29498 adcl %ebx, %eax
29499-DST( movl %ebx, 24(%edi) )
29500+DST( movl %ebx, %es:24(%edi) )
29501 adcl %edx, %eax
29502-DST( movl %edx, 28(%edi) )
29503+DST( movl %edx, %es:28(%edi) )
29504
29505 lea 32(%esi), %esi
29506 lea 32(%edi), %edi
29507@@ -366,7 +382,7 @@ DST( movl %edx, 28(%edi) )
29508 shrl $2, %edx # This clears CF
29509 SRC(3: movl (%esi), %ebx )
29510 adcl %ebx, %eax
29511-DST( movl %ebx, (%edi) )
29512+DST( movl %ebx, %es:(%edi) )
29513 lea 4(%esi), %esi
29514 lea 4(%edi), %edi
29515 dec %edx
29516@@ -378,12 +394,12 @@ DST( movl %ebx, (%edi) )
29517 jb 5f
29518 SRC( movw (%esi), %cx )
29519 leal 2(%esi), %esi
29520-DST( movw %cx, (%edi) )
29521+DST( movw %cx, %es:(%edi) )
29522 leal 2(%edi), %edi
29523 je 6f
29524 shll $16,%ecx
29525 SRC(5: movb (%esi), %cl )
29526-DST( movb %cl, (%edi) )
29527+DST( movb %cl, %es:(%edi) )
29528 6: addl %ecx, %eax
29529 adcl $0, %eax
29530 7:
29531@@ -394,7 +410,7 @@ DST( movb %cl, (%edi) )
29532
29533 6001:
29534 movl ARGBASE+20(%esp), %ebx # src_err_ptr
29535- movl $-EFAULT, (%ebx)
29536+ movl $-EFAULT, %ss:(%ebx)
29537
29538 # zero the complete destination - computing the rest
29539 # is too much work
29540@@ -407,11 +423,15 @@ DST( movb %cl, (%edi) )
29541
29542 6002:
29543 movl ARGBASE+24(%esp), %ebx # dst_err_ptr
29544- movl $-EFAULT,(%ebx)
29545+ movl $-EFAULT,%ss:(%ebx)
29546 jmp 5000b
29547
29548 .previous
29549
29550+ pushl_cfi %ss
29551+ popl_cfi %ds
29552+ pushl_cfi %ss
29553+ popl_cfi %es
29554 popl_cfi %ebx
29555 CFI_RESTORE ebx
29556 popl_cfi %esi
29557@@ -421,26 +441,43 @@ DST( movb %cl, (%edi) )
29558 popl_cfi %ecx # equivalent to addl $4,%esp
29559 ret
29560 CFI_ENDPROC
29561-ENDPROC(csum_partial_copy_generic)
29562+ENDPROC(csum_partial_copy_generic_to_user)
29563
29564 #else
29565
29566 /* Version for PentiumII/PPro */
29567
29568 #define ROUND1(x) \
29569+ nop; nop; nop; \
29570 SRC(movl x(%esi), %ebx ) ; \
29571 addl %ebx, %eax ; \
29572- DST(movl %ebx, x(%edi) ) ;
29573+ DST(movl %ebx, %es:x(%edi)) ;
29574
29575 #define ROUND(x) \
29576+ nop; nop; nop; \
29577 SRC(movl x(%esi), %ebx ) ; \
29578 adcl %ebx, %eax ; \
29579- DST(movl %ebx, x(%edi) ) ;
29580+ DST(movl %ebx, %es:x(%edi)) ;
29581
29582 #define ARGBASE 12
29583-
29584-ENTRY(csum_partial_copy_generic)
29585+
29586+ENTRY(csum_partial_copy_generic_to_user)
29587 CFI_STARTPROC
29588+
29589+#ifdef CONFIG_PAX_MEMORY_UDEREF
29590+ pushl_cfi %gs
29591+ popl_cfi %es
29592+ jmp csum_partial_copy_generic
29593+#endif
29594+
29595+ENTRY(csum_partial_copy_generic_from_user)
29596+
29597+#ifdef CONFIG_PAX_MEMORY_UDEREF
29598+ pushl_cfi %gs
29599+ popl_cfi %ds
29600+#endif
29601+
29602+ENTRY(csum_partial_copy_generic)
29603 pushl_cfi %ebx
29604 CFI_REL_OFFSET ebx, 0
29605 pushl_cfi %edi
29606@@ -461,7 +498,7 @@ ENTRY(csum_partial_copy_generic)
29607 subl %ebx, %edi
29608 lea -1(%esi),%edx
29609 andl $-32,%edx
29610- lea 3f(%ebx,%ebx), %ebx
29611+ lea 3f(%ebx,%ebx,2), %ebx
29612 testl %esi, %esi
29613 jmp *%ebx
29614 1: addl $64,%esi
29615@@ -482,19 +519,19 @@ ENTRY(csum_partial_copy_generic)
29616 jb 5f
29617 SRC( movw (%esi), %dx )
29618 leal 2(%esi), %esi
29619-DST( movw %dx, (%edi) )
29620+DST( movw %dx, %es:(%edi) )
29621 leal 2(%edi), %edi
29622 je 6f
29623 shll $16,%edx
29624 5:
29625 SRC( movb (%esi), %dl )
29626-DST( movb %dl, (%edi) )
29627+DST( movb %dl, %es:(%edi) )
29628 6: addl %edx, %eax
29629 adcl $0, %eax
29630 7:
29631 .section .fixup, "ax"
29632 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
29633- movl $-EFAULT, (%ebx)
29634+ movl $-EFAULT, %ss:(%ebx)
29635 # zero the complete destination (computing the rest is too much work)
29636 movl ARGBASE+8(%esp),%edi # dst
29637 movl ARGBASE+12(%esp),%ecx # len
29638@@ -502,10 +539,17 @@ DST( movb %dl, (%edi) )
29639 rep; stosb
29640 jmp 7b
29641 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
29642- movl $-EFAULT, (%ebx)
29643+ movl $-EFAULT, %ss:(%ebx)
29644 jmp 7b
29645 .previous
29646
29647+#ifdef CONFIG_PAX_MEMORY_UDEREF
29648+ pushl_cfi %ss
29649+ popl_cfi %ds
29650+ pushl_cfi %ss
29651+ popl_cfi %es
29652+#endif
29653+
29654 popl_cfi %esi
29655 CFI_RESTORE esi
29656 popl_cfi %edi
29657@@ -514,7 +558,7 @@ DST( movb %dl, (%edi) )
29658 CFI_RESTORE ebx
29659 ret
29660 CFI_ENDPROC
29661-ENDPROC(csum_partial_copy_generic)
29662+ENDPROC(csum_partial_copy_generic_to_user)
29663
29664 #undef ROUND
29665 #undef ROUND1
29666diff --git a/arch/x86/lib/clear_page_64.S b/arch/x86/lib/clear_page_64.S
29667index f2145cf..cea889d 100644
29668--- a/arch/x86/lib/clear_page_64.S
29669+++ b/arch/x86/lib/clear_page_64.S
29670@@ -11,6 +11,7 @@ ENTRY(clear_page_c)
29671 movl $4096/8,%ecx
29672 xorl %eax,%eax
29673 rep stosq
29674+ pax_force_retaddr
29675 ret
29676 CFI_ENDPROC
29677 ENDPROC(clear_page_c)
29678@@ -20,6 +21,7 @@ ENTRY(clear_page_c_e)
29679 movl $4096,%ecx
29680 xorl %eax,%eax
29681 rep stosb
29682+ pax_force_retaddr
29683 ret
29684 CFI_ENDPROC
29685 ENDPROC(clear_page_c_e)
29686@@ -43,6 +45,7 @@ ENTRY(clear_page)
29687 leaq 64(%rdi),%rdi
29688 jnz .Lloop
29689 nop
29690+ pax_force_retaddr
29691 ret
29692 CFI_ENDPROC
29693 .Lclear_page_end:
29694@@ -58,7 +61,7 @@ ENDPROC(clear_page)
29695
29696 #include <asm/cpufeature.h>
29697
29698- .section .altinstr_replacement,"ax"
29699+ .section .altinstr_replacement,"a"
29700 1: .byte 0xeb /* jmp <disp8> */
29701 .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
29702 2: .byte 0xeb /* jmp <disp8> */
29703diff --git a/arch/x86/lib/cmpxchg16b_emu.S b/arch/x86/lib/cmpxchg16b_emu.S
29704index 1e572c5..2a162cd 100644
29705--- a/arch/x86/lib/cmpxchg16b_emu.S
29706+++ b/arch/x86/lib/cmpxchg16b_emu.S
29707@@ -53,11 +53,13 @@ this_cpu_cmpxchg16b_emu:
29708
29709 popf
29710 mov $1, %al
29711+ pax_force_retaddr
29712 ret
29713
29714 not_same:
29715 popf
29716 xor %al,%al
29717+ pax_force_retaddr
29718 ret
29719
29720 CFI_ENDPROC
29721diff --git a/arch/x86/lib/copy_page_64.S b/arch/x86/lib/copy_page_64.S
29722index 176cca6..e0d658e 100644
29723--- a/arch/x86/lib/copy_page_64.S
29724+++ b/arch/x86/lib/copy_page_64.S
29725@@ -9,6 +9,7 @@ copy_page_rep:
29726 CFI_STARTPROC
29727 movl $4096/8, %ecx
29728 rep movsq
29729+ pax_force_retaddr
29730 ret
29731 CFI_ENDPROC
29732 ENDPROC(copy_page_rep)
29733@@ -24,8 +25,8 @@ ENTRY(copy_page)
29734 CFI_ADJUST_CFA_OFFSET 2*8
29735 movq %rbx, (%rsp)
29736 CFI_REL_OFFSET rbx, 0
29737- movq %r12, 1*8(%rsp)
29738- CFI_REL_OFFSET r12, 1*8
29739+ movq %r13, 1*8(%rsp)
29740+ CFI_REL_OFFSET r13, 1*8
29741
29742 movl $(4096/64)-5, %ecx
29743 .p2align 4
29744@@ -38,7 +39,7 @@ ENTRY(copy_page)
29745 movq 0x8*4(%rsi), %r9
29746 movq 0x8*5(%rsi), %r10
29747 movq 0x8*6(%rsi), %r11
29748- movq 0x8*7(%rsi), %r12
29749+ movq 0x8*7(%rsi), %r13
29750
29751 prefetcht0 5*64(%rsi)
29752
29753@@ -49,7 +50,7 @@ ENTRY(copy_page)
29754 movq %r9, 0x8*4(%rdi)
29755 movq %r10, 0x8*5(%rdi)
29756 movq %r11, 0x8*6(%rdi)
29757- movq %r12, 0x8*7(%rdi)
29758+ movq %r13, 0x8*7(%rdi)
29759
29760 leaq 64 (%rsi), %rsi
29761 leaq 64 (%rdi), %rdi
29762@@ -68,7 +69,7 @@ ENTRY(copy_page)
29763 movq 0x8*4(%rsi), %r9
29764 movq 0x8*5(%rsi), %r10
29765 movq 0x8*6(%rsi), %r11
29766- movq 0x8*7(%rsi), %r12
29767+ movq 0x8*7(%rsi), %r13
29768
29769 movq %rax, 0x8*0(%rdi)
29770 movq %rbx, 0x8*1(%rdi)
29771@@ -77,7 +78,7 @@ ENTRY(copy_page)
29772 movq %r9, 0x8*4(%rdi)
29773 movq %r10, 0x8*5(%rdi)
29774 movq %r11, 0x8*6(%rdi)
29775- movq %r12, 0x8*7(%rdi)
29776+ movq %r13, 0x8*7(%rdi)
29777
29778 leaq 64(%rdi), %rdi
29779 leaq 64(%rsi), %rsi
29780@@ -85,10 +86,11 @@ ENTRY(copy_page)
29781
29782 movq (%rsp), %rbx
29783 CFI_RESTORE rbx
29784- movq 1*8(%rsp), %r12
29785- CFI_RESTORE r12
29786+ movq 1*8(%rsp), %r13
29787+ CFI_RESTORE r13
29788 addq $2*8, %rsp
29789 CFI_ADJUST_CFA_OFFSET -2*8
29790+ pax_force_retaddr
29791 ret
29792 .Lcopy_page_end:
29793 CFI_ENDPROC
29794@@ -99,7 +101,7 @@ ENDPROC(copy_page)
29795
29796 #include <asm/cpufeature.h>
29797
29798- .section .altinstr_replacement,"ax"
29799+ .section .altinstr_replacement,"a"
29800 1: .byte 0xeb /* jmp <disp8> */
29801 .byte (copy_page_rep - copy_page) - (2f - 1b) /* offset */
29802 2:
29803diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
29804index dee945d..a84067b 100644
29805--- a/arch/x86/lib/copy_user_64.S
29806+++ b/arch/x86/lib/copy_user_64.S
29807@@ -18,31 +18,7 @@
29808 #include <asm/alternative-asm.h>
29809 #include <asm/asm.h>
29810 #include <asm/smap.h>
29811-
29812-/*
29813- * By placing feature2 after feature1 in altinstructions section, we logically
29814- * implement:
29815- * If CPU has feature2, jmp to alt2 is used
29816- * else if CPU has feature1, jmp to alt1 is used
29817- * else jmp to orig is used.
29818- */
29819- .macro ALTERNATIVE_JUMP feature1,feature2,orig,alt1,alt2
29820-0:
29821- .byte 0xe9 /* 32bit jump */
29822- .long \orig-1f /* by default jump to orig */
29823-1:
29824- .section .altinstr_replacement,"ax"
29825-2: .byte 0xe9 /* near jump with 32bit immediate */
29826- .long \alt1-1b /* offset */ /* or alternatively to alt1 */
29827-3: .byte 0xe9 /* near jump with 32bit immediate */
29828- .long \alt2-1b /* offset */ /* or alternatively to alt2 */
29829- .previous
29830-
29831- .section .altinstructions,"a"
29832- altinstruction_entry 0b,2b,\feature1,5,5
29833- altinstruction_entry 0b,3b,\feature2,5,5
29834- .previous
29835- .endm
29836+#include <asm/pgtable.h>
29837
29838 .macro ALIGN_DESTINATION
29839 #ifdef FIX_ALIGNMENT
29840@@ -70,52 +46,6 @@
29841 #endif
29842 .endm
29843
29844-/* Standard copy_to_user with segment limit checking */
29845-ENTRY(_copy_to_user)
29846- CFI_STARTPROC
29847- GET_THREAD_INFO(%rax)
29848- movq %rdi,%rcx
29849- addq %rdx,%rcx
29850- jc bad_to_user
29851- cmpq TI_addr_limit(%rax),%rcx
29852- ja bad_to_user
29853- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
29854- copy_user_generic_unrolled,copy_user_generic_string, \
29855- copy_user_enhanced_fast_string
29856- CFI_ENDPROC
29857-ENDPROC(_copy_to_user)
29858-
29859-/* Standard copy_from_user with segment limit checking */
29860-ENTRY(_copy_from_user)
29861- CFI_STARTPROC
29862- GET_THREAD_INFO(%rax)
29863- movq %rsi,%rcx
29864- addq %rdx,%rcx
29865- jc bad_from_user
29866- cmpq TI_addr_limit(%rax),%rcx
29867- ja bad_from_user
29868- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
29869- copy_user_generic_unrolled,copy_user_generic_string, \
29870- copy_user_enhanced_fast_string
29871- CFI_ENDPROC
29872-ENDPROC(_copy_from_user)
29873-
29874- .section .fixup,"ax"
29875- /* must zero dest */
29876-ENTRY(bad_from_user)
29877-bad_from_user:
29878- CFI_STARTPROC
29879- movl %edx,%ecx
29880- xorl %eax,%eax
29881- rep
29882- stosb
29883-bad_to_user:
29884- movl %edx,%eax
29885- ret
29886- CFI_ENDPROC
29887-ENDPROC(bad_from_user)
29888- .previous
29889-
29890 /*
29891 * copy_user_generic_unrolled - memory copy with exception handling.
29892 * This version is for CPUs like P4 that don't have efficient micro
29893@@ -131,6 +61,7 @@ ENDPROC(bad_from_user)
29894 */
29895 ENTRY(copy_user_generic_unrolled)
29896 CFI_STARTPROC
29897+ ASM_PAX_OPEN_USERLAND
29898 ASM_STAC
29899 cmpl $8,%edx
29900 jb 20f /* less then 8 bytes, go to byte copy loop */
29901@@ -180,6 +111,8 @@ ENTRY(copy_user_generic_unrolled)
29902 jnz 21b
29903 23: xor %eax,%eax
29904 ASM_CLAC
29905+ ASM_PAX_CLOSE_USERLAND
29906+ pax_force_retaddr
29907 ret
29908
29909 .section .fixup,"ax"
29910@@ -235,6 +168,7 @@ ENDPROC(copy_user_generic_unrolled)
29911 */
29912 ENTRY(copy_user_generic_string)
29913 CFI_STARTPROC
29914+ ASM_PAX_OPEN_USERLAND
29915 ASM_STAC
29916 cmpl $8,%edx
29917 jb 2f /* less than 8 bytes, go to byte copy loop */
29918@@ -249,6 +183,8 @@ ENTRY(copy_user_generic_string)
29919 movsb
29920 xorl %eax,%eax
29921 ASM_CLAC
29922+ ASM_PAX_CLOSE_USERLAND
29923+ pax_force_retaddr
29924 ret
29925
29926 .section .fixup,"ax"
29927@@ -276,12 +212,15 @@ ENDPROC(copy_user_generic_string)
29928 */
29929 ENTRY(copy_user_enhanced_fast_string)
29930 CFI_STARTPROC
29931+ ASM_PAX_OPEN_USERLAND
29932 ASM_STAC
29933 movl %edx,%ecx
29934 1: rep
29935 movsb
29936 xorl %eax,%eax
29937 ASM_CLAC
29938+ ASM_PAX_CLOSE_USERLAND
29939+ pax_force_retaddr
29940 ret
29941
29942 .section .fixup,"ax"
29943diff --git a/arch/x86/lib/copy_user_nocache_64.S b/arch/x86/lib/copy_user_nocache_64.S
29944index 6a4f43c..c70fb52 100644
29945--- a/arch/x86/lib/copy_user_nocache_64.S
29946+++ b/arch/x86/lib/copy_user_nocache_64.S
29947@@ -8,6 +8,7 @@
29948
29949 #include <linux/linkage.h>
29950 #include <asm/dwarf2.h>
29951+#include <asm/alternative-asm.h>
29952
29953 #define FIX_ALIGNMENT 1
29954
29955@@ -16,6 +17,7 @@
29956 #include <asm/thread_info.h>
29957 #include <asm/asm.h>
29958 #include <asm/smap.h>
29959+#include <asm/pgtable.h>
29960
29961 .macro ALIGN_DESTINATION
29962 #ifdef FIX_ALIGNMENT
29963@@ -49,6 +51,16 @@
29964 */
29965 ENTRY(__copy_user_nocache)
29966 CFI_STARTPROC
29967+
29968+#ifdef CONFIG_PAX_MEMORY_UDEREF
29969+ mov pax_user_shadow_base,%rcx
29970+ cmp %rcx,%rsi
29971+ jae 1f
29972+ add %rcx,%rsi
29973+1:
29974+#endif
29975+
29976+ ASM_PAX_OPEN_USERLAND
29977 ASM_STAC
29978 cmpl $8,%edx
29979 jb 20f /* less then 8 bytes, go to byte copy loop */
29980@@ -98,7 +110,9 @@ ENTRY(__copy_user_nocache)
29981 jnz 21b
29982 23: xorl %eax,%eax
29983 ASM_CLAC
29984+ ASM_PAX_CLOSE_USERLAND
29985 sfence
29986+ pax_force_retaddr
29987 ret
29988
29989 .section .fixup,"ax"
29990diff --git a/arch/x86/lib/csum-copy_64.S b/arch/x86/lib/csum-copy_64.S
29991index 2419d5f..fe52d0e 100644
29992--- a/arch/x86/lib/csum-copy_64.S
29993+++ b/arch/x86/lib/csum-copy_64.S
29994@@ -9,6 +9,7 @@
29995 #include <asm/dwarf2.h>
29996 #include <asm/errno.h>
29997 #include <asm/asm.h>
29998+#include <asm/alternative-asm.h>
29999
30000 /*
30001 * Checksum copy with exception handling.
30002@@ -56,8 +57,8 @@ ENTRY(csum_partial_copy_generic)
30003 CFI_ADJUST_CFA_OFFSET 7*8
30004 movq %rbx, 2*8(%rsp)
30005 CFI_REL_OFFSET rbx, 2*8
30006- movq %r12, 3*8(%rsp)
30007- CFI_REL_OFFSET r12, 3*8
30008+ movq %r15, 3*8(%rsp)
30009+ CFI_REL_OFFSET r15, 3*8
30010 movq %r14, 4*8(%rsp)
30011 CFI_REL_OFFSET r14, 4*8
30012 movq %r13, 5*8(%rsp)
30013@@ -72,16 +73,16 @@ ENTRY(csum_partial_copy_generic)
30014 movl %edx, %ecx
30015
30016 xorl %r9d, %r9d
30017- movq %rcx, %r12
30018+ movq %rcx, %r15
30019
30020- shrq $6, %r12
30021+ shrq $6, %r15
30022 jz .Lhandle_tail /* < 64 */
30023
30024 clc
30025
30026 /* main loop. clear in 64 byte blocks */
30027 /* r9: zero, r8: temp2, rbx: temp1, rax: sum, rcx: saved length */
30028- /* r11: temp3, rdx: temp4, r12 loopcnt */
30029+ /* r11: temp3, rdx: temp4, r15 loopcnt */
30030 /* r10: temp5, rbp: temp6, r14 temp7, r13 temp8 */
30031 .p2align 4
30032 .Lloop:
30033@@ -115,7 +116,7 @@ ENTRY(csum_partial_copy_generic)
30034 adcq %r14, %rax
30035 adcq %r13, %rax
30036
30037- decl %r12d
30038+ decl %r15d
30039
30040 dest
30041 movq %rbx, (%rsi)
30042@@ -210,8 +211,8 @@ ENTRY(csum_partial_copy_generic)
30043 .Lende:
30044 movq 2*8(%rsp), %rbx
30045 CFI_RESTORE rbx
30046- movq 3*8(%rsp), %r12
30047- CFI_RESTORE r12
30048+ movq 3*8(%rsp), %r15
30049+ CFI_RESTORE r15
30050 movq 4*8(%rsp), %r14
30051 CFI_RESTORE r14
30052 movq 5*8(%rsp), %r13
30053@@ -220,6 +221,7 @@ ENTRY(csum_partial_copy_generic)
30054 CFI_RESTORE rbp
30055 addq $7*8, %rsp
30056 CFI_ADJUST_CFA_OFFSET -7*8
30057+ pax_force_retaddr
30058 ret
30059 CFI_RESTORE_STATE
30060
30061diff --git a/arch/x86/lib/csum-wrappers_64.c b/arch/x86/lib/csum-wrappers_64.c
30062index 7609e0e..b449b98 100644
30063--- a/arch/x86/lib/csum-wrappers_64.c
30064+++ b/arch/x86/lib/csum-wrappers_64.c
30065@@ -53,10 +53,12 @@ csum_partial_copy_from_user(const void __user *src, void *dst,
30066 len -= 2;
30067 }
30068 }
30069+ pax_open_userland();
30070 stac();
30071- isum = csum_partial_copy_generic((__force const void *)src,
30072+ isum = csum_partial_copy_generic((const void __force_kernel *)____m(src),
30073 dst, len, isum, errp, NULL);
30074 clac();
30075+ pax_close_userland();
30076 if (unlikely(*errp))
30077 goto out_err;
30078
30079@@ -110,10 +112,12 @@ csum_partial_copy_to_user(const void *src, void __user *dst,
30080 }
30081
30082 *errp = 0;
30083+ pax_open_userland();
30084 stac();
30085- ret = csum_partial_copy_generic(src, (void __force *)dst,
30086+ ret = csum_partial_copy_generic(src, (void __force_kernel *)____m(dst),
30087 len, isum, NULL, errp);
30088 clac();
30089+ pax_close_userland();
30090 return ret;
30091 }
30092 EXPORT_SYMBOL(csum_partial_copy_to_user);
30093diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S
30094index a451235..1daa956 100644
30095--- a/arch/x86/lib/getuser.S
30096+++ b/arch/x86/lib/getuser.S
30097@@ -33,17 +33,40 @@
30098 #include <asm/thread_info.h>
30099 #include <asm/asm.h>
30100 #include <asm/smap.h>
30101+#include <asm/segment.h>
30102+#include <asm/pgtable.h>
30103+#include <asm/alternative-asm.h>
30104+
30105+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
30106+#define __copyuser_seg gs;
30107+#else
30108+#define __copyuser_seg
30109+#endif
30110
30111 .text
30112 ENTRY(__get_user_1)
30113 CFI_STARTPROC
30114+
30115+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
30116 GET_THREAD_INFO(%_ASM_DX)
30117 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
30118 jae bad_get_user
30119 ASM_STAC
30120-1: movzbl (%_ASM_AX),%edx
30121+
30122+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
30123+ mov pax_user_shadow_base,%_ASM_DX
30124+ cmp %_ASM_DX,%_ASM_AX
30125+ jae 1234f
30126+ add %_ASM_DX,%_ASM_AX
30127+1234:
30128+#endif
30129+
30130+#endif
30131+
30132+1: __copyuser_seg movzbl (%_ASM_AX),%edx
30133 xor %eax,%eax
30134 ASM_CLAC
30135+ pax_force_retaddr
30136 ret
30137 CFI_ENDPROC
30138 ENDPROC(__get_user_1)
30139@@ -51,14 +74,28 @@ ENDPROC(__get_user_1)
30140 ENTRY(__get_user_2)
30141 CFI_STARTPROC
30142 add $1,%_ASM_AX
30143+
30144+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
30145 jc bad_get_user
30146 GET_THREAD_INFO(%_ASM_DX)
30147 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
30148 jae bad_get_user
30149 ASM_STAC
30150-2: movzwl -1(%_ASM_AX),%edx
30151+
30152+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
30153+ mov pax_user_shadow_base,%_ASM_DX
30154+ cmp %_ASM_DX,%_ASM_AX
30155+ jae 1234f
30156+ add %_ASM_DX,%_ASM_AX
30157+1234:
30158+#endif
30159+
30160+#endif
30161+
30162+2: __copyuser_seg movzwl -1(%_ASM_AX),%edx
30163 xor %eax,%eax
30164 ASM_CLAC
30165+ pax_force_retaddr
30166 ret
30167 CFI_ENDPROC
30168 ENDPROC(__get_user_2)
30169@@ -66,14 +103,28 @@ ENDPROC(__get_user_2)
30170 ENTRY(__get_user_4)
30171 CFI_STARTPROC
30172 add $3,%_ASM_AX
30173+
30174+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
30175 jc bad_get_user
30176 GET_THREAD_INFO(%_ASM_DX)
30177 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
30178 jae bad_get_user
30179 ASM_STAC
30180-3: movl -3(%_ASM_AX),%edx
30181+
30182+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
30183+ mov pax_user_shadow_base,%_ASM_DX
30184+ cmp %_ASM_DX,%_ASM_AX
30185+ jae 1234f
30186+ add %_ASM_DX,%_ASM_AX
30187+1234:
30188+#endif
30189+
30190+#endif
30191+
30192+3: __copyuser_seg movl -3(%_ASM_AX),%edx
30193 xor %eax,%eax
30194 ASM_CLAC
30195+ pax_force_retaddr
30196 ret
30197 CFI_ENDPROC
30198 ENDPROC(__get_user_4)
30199@@ -86,10 +137,20 @@ ENTRY(__get_user_8)
30200 GET_THREAD_INFO(%_ASM_DX)
30201 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
30202 jae bad_get_user
30203+
30204+#ifdef CONFIG_PAX_MEMORY_UDEREF
30205+ mov pax_user_shadow_base,%_ASM_DX
30206+ cmp %_ASM_DX,%_ASM_AX
30207+ jae 1234f
30208+ add %_ASM_DX,%_ASM_AX
30209+1234:
30210+#endif
30211+
30212 ASM_STAC
30213 4: movq -7(%_ASM_AX),%rdx
30214 xor %eax,%eax
30215 ASM_CLAC
30216+ pax_force_retaddr
30217 ret
30218 #else
30219 add $7,%_ASM_AX
30220@@ -98,10 +159,11 @@ ENTRY(__get_user_8)
30221 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
30222 jae bad_get_user_8
30223 ASM_STAC
30224-4: movl -7(%_ASM_AX),%edx
30225-5: movl -3(%_ASM_AX),%ecx
30226+4: __copyuser_seg movl -7(%_ASM_AX),%edx
30227+5: __copyuser_seg movl -3(%_ASM_AX),%ecx
30228 xor %eax,%eax
30229 ASM_CLAC
30230+ pax_force_retaddr
30231 ret
30232 #endif
30233 CFI_ENDPROC
30234@@ -113,6 +175,7 @@ bad_get_user:
30235 xor %edx,%edx
30236 mov $(-EFAULT),%_ASM_AX
30237 ASM_CLAC
30238+ pax_force_retaddr
30239 ret
30240 CFI_ENDPROC
30241 END(bad_get_user)
30242@@ -124,6 +187,7 @@ bad_get_user_8:
30243 xor %ecx,%ecx
30244 mov $(-EFAULT),%_ASM_AX
30245 ASM_CLAC
30246+ pax_force_retaddr
30247 ret
30248 CFI_ENDPROC
30249 END(bad_get_user_8)
30250diff --git a/arch/x86/lib/insn.c b/arch/x86/lib/insn.c
30251index 54fcffe..7be149e 100644
30252--- a/arch/x86/lib/insn.c
30253+++ b/arch/x86/lib/insn.c
30254@@ -20,8 +20,10 @@
30255
30256 #ifdef __KERNEL__
30257 #include <linux/string.h>
30258+#include <asm/pgtable_types.h>
30259 #else
30260 #include <string.h>
30261+#define ktla_ktva(addr) addr
30262 #endif
30263 #include <asm/inat.h>
30264 #include <asm/insn.h>
30265@@ -53,8 +55,8 @@
30266 void insn_init(struct insn *insn, const void *kaddr, int x86_64)
30267 {
30268 memset(insn, 0, sizeof(*insn));
30269- insn->kaddr = kaddr;
30270- insn->next_byte = kaddr;
30271+ insn->kaddr = ktla_ktva(kaddr);
30272+ insn->next_byte = ktla_ktva(kaddr);
30273 insn->x86_64 = x86_64 ? 1 : 0;
30274 insn->opnd_bytes = 4;
30275 if (x86_64)
30276diff --git a/arch/x86/lib/iomap_copy_64.S b/arch/x86/lib/iomap_copy_64.S
30277index 05a95e7..326f2fa 100644
30278--- a/arch/x86/lib/iomap_copy_64.S
30279+++ b/arch/x86/lib/iomap_copy_64.S
30280@@ -17,6 +17,7 @@
30281
30282 #include <linux/linkage.h>
30283 #include <asm/dwarf2.h>
30284+#include <asm/alternative-asm.h>
30285
30286 /*
30287 * override generic version in lib/iomap_copy.c
30288@@ -25,6 +26,7 @@ ENTRY(__iowrite32_copy)
30289 CFI_STARTPROC
30290 movl %edx,%ecx
30291 rep movsd
30292+ pax_force_retaddr
30293 ret
30294 CFI_ENDPROC
30295 ENDPROC(__iowrite32_copy)
30296diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
30297index 56313a3..0db417e 100644
30298--- a/arch/x86/lib/memcpy_64.S
30299+++ b/arch/x86/lib/memcpy_64.S
30300@@ -24,7 +24,7 @@
30301 * This gets patched over the unrolled variant (below) via the
30302 * alternative instructions framework:
30303 */
30304- .section .altinstr_replacement, "ax", @progbits
30305+ .section .altinstr_replacement, "a", @progbits
30306 .Lmemcpy_c:
30307 movq %rdi, %rax
30308 movq %rdx, %rcx
30309@@ -33,6 +33,7 @@
30310 rep movsq
30311 movl %edx, %ecx
30312 rep movsb
30313+ pax_force_retaddr
30314 ret
30315 .Lmemcpy_e:
30316 .previous
30317@@ -44,11 +45,12 @@
30318 * This gets patched over the unrolled variant (below) via the
30319 * alternative instructions framework:
30320 */
30321- .section .altinstr_replacement, "ax", @progbits
30322+ .section .altinstr_replacement, "a", @progbits
30323 .Lmemcpy_c_e:
30324 movq %rdi, %rax
30325 movq %rdx, %rcx
30326 rep movsb
30327+ pax_force_retaddr
30328 ret
30329 .Lmemcpy_e_e:
30330 .previous
30331@@ -136,6 +138,7 @@ ENTRY(memcpy)
30332 movq %r9, 1*8(%rdi)
30333 movq %r10, -2*8(%rdi, %rdx)
30334 movq %r11, -1*8(%rdi, %rdx)
30335+ pax_force_retaddr
30336 retq
30337 .p2align 4
30338 .Lless_16bytes:
30339@@ -148,6 +151,7 @@ ENTRY(memcpy)
30340 movq -1*8(%rsi, %rdx), %r9
30341 movq %r8, 0*8(%rdi)
30342 movq %r9, -1*8(%rdi, %rdx)
30343+ pax_force_retaddr
30344 retq
30345 .p2align 4
30346 .Lless_8bytes:
30347@@ -161,6 +165,7 @@ ENTRY(memcpy)
30348 movl -4(%rsi, %rdx), %r8d
30349 movl %ecx, (%rdi)
30350 movl %r8d, -4(%rdi, %rdx)
30351+ pax_force_retaddr
30352 retq
30353 .p2align 4
30354 .Lless_3bytes:
30355@@ -179,6 +184,7 @@ ENTRY(memcpy)
30356 movb %cl, (%rdi)
30357
30358 .Lend:
30359+ pax_force_retaddr
30360 retq
30361 CFI_ENDPROC
30362 ENDPROC(memcpy)
30363diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S
30364index 65268a6..dd1de11 100644
30365--- a/arch/x86/lib/memmove_64.S
30366+++ b/arch/x86/lib/memmove_64.S
30367@@ -202,14 +202,16 @@ ENTRY(memmove)
30368 movb (%rsi), %r11b
30369 movb %r11b, (%rdi)
30370 13:
30371+ pax_force_retaddr
30372 retq
30373 CFI_ENDPROC
30374
30375- .section .altinstr_replacement,"ax"
30376+ .section .altinstr_replacement,"a"
30377 .Lmemmove_begin_forward_efs:
30378 /* Forward moving data. */
30379 movq %rdx, %rcx
30380 rep movsb
30381+ pax_force_retaddr
30382 retq
30383 .Lmemmove_end_forward_efs:
30384 .previous
30385diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
30386index 2dcb380..2eb79fe 100644
30387--- a/arch/x86/lib/memset_64.S
30388+++ b/arch/x86/lib/memset_64.S
30389@@ -16,7 +16,7 @@
30390 *
30391 * rax original destination
30392 */
30393- .section .altinstr_replacement, "ax", @progbits
30394+ .section .altinstr_replacement, "a", @progbits
30395 .Lmemset_c:
30396 movq %rdi,%r9
30397 movq %rdx,%rcx
30398@@ -30,6 +30,7 @@
30399 movl %edx,%ecx
30400 rep stosb
30401 movq %r9,%rax
30402+ pax_force_retaddr
30403 ret
30404 .Lmemset_e:
30405 .previous
30406@@ -45,13 +46,14 @@
30407 *
30408 * rax original destination
30409 */
30410- .section .altinstr_replacement, "ax", @progbits
30411+ .section .altinstr_replacement, "a", @progbits
30412 .Lmemset_c_e:
30413 movq %rdi,%r9
30414 movb %sil,%al
30415 movq %rdx,%rcx
30416 rep stosb
30417 movq %r9,%rax
30418+ pax_force_retaddr
30419 ret
30420 .Lmemset_e_e:
30421 .previous
30422@@ -118,6 +120,7 @@ ENTRY(__memset)
30423
30424 .Lende:
30425 movq %r10,%rax
30426+ pax_force_retaddr
30427 ret
30428
30429 CFI_RESTORE_STATE
30430diff --git a/arch/x86/lib/mmx_32.c b/arch/x86/lib/mmx_32.c
30431index c9f2d9b..e7fd2c0 100644
30432--- a/arch/x86/lib/mmx_32.c
30433+++ b/arch/x86/lib/mmx_32.c
30434@@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
30435 {
30436 void *p;
30437 int i;
30438+ unsigned long cr0;
30439
30440 if (unlikely(in_interrupt()))
30441 return __memcpy(to, from, len);
30442@@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
30443 kernel_fpu_begin();
30444
30445 __asm__ __volatile__ (
30446- "1: prefetch (%0)\n" /* This set is 28 bytes */
30447- " prefetch 64(%0)\n"
30448- " prefetch 128(%0)\n"
30449- " prefetch 192(%0)\n"
30450- " prefetch 256(%0)\n"
30451+ "1: prefetch (%1)\n" /* This set is 28 bytes */
30452+ " prefetch 64(%1)\n"
30453+ " prefetch 128(%1)\n"
30454+ " prefetch 192(%1)\n"
30455+ " prefetch 256(%1)\n"
30456 "2: \n"
30457 ".section .fixup, \"ax\"\n"
30458- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
30459+ "3: \n"
30460+
30461+#ifdef CONFIG_PAX_KERNEXEC
30462+ " movl %%cr0, %0\n"
30463+ " movl %0, %%eax\n"
30464+ " andl $0xFFFEFFFF, %%eax\n"
30465+ " movl %%eax, %%cr0\n"
30466+#endif
30467+
30468+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
30469+
30470+#ifdef CONFIG_PAX_KERNEXEC
30471+ " movl %0, %%cr0\n"
30472+#endif
30473+
30474 " jmp 2b\n"
30475 ".previous\n"
30476 _ASM_EXTABLE(1b, 3b)
30477- : : "r" (from));
30478+ : "=&r" (cr0) : "r" (from) : "ax");
30479
30480 for ( ; i > 5; i--) {
30481 __asm__ __volatile__ (
30482- "1: prefetch 320(%0)\n"
30483- "2: movq (%0), %%mm0\n"
30484- " movq 8(%0), %%mm1\n"
30485- " movq 16(%0), %%mm2\n"
30486- " movq 24(%0), %%mm3\n"
30487- " movq %%mm0, (%1)\n"
30488- " movq %%mm1, 8(%1)\n"
30489- " movq %%mm2, 16(%1)\n"
30490- " movq %%mm3, 24(%1)\n"
30491- " movq 32(%0), %%mm0\n"
30492- " movq 40(%0), %%mm1\n"
30493- " movq 48(%0), %%mm2\n"
30494- " movq 56(%0), %%mm3\n"
30495- " movq %%mm0, 32(%1)\n"
30496- " movq %%mm1, 40(%1)\n"
30497- " movq %%mm2, 48(%1)\n"
30498- " movq %%mm3, 56(%1)\n"
30499+ "1: prefetch 320(%1)\n"
30500+ "2: movq (%1), %%mm0\n"
30501+ " movq 8(%1), %%mm1\n"
30502+ " movq 16(%1), %%mm2\n"
30503+ " movq 24(%1), %%mm3\n"
30504+ " movq %%mm0, (%2)\n"
30505+ " movq %%mm1, 8(%2)\n"
30506+ " movq %%mm2, 16(%2)\n"
30507+ " movq %%mm3, 24(%2)\n"
30508+ " movq 32(%1), %%mm0\n"
30509+ " movq 40(%1), %%mm1\n"
30510+ " movq 48(%1), %%mm2\n"
30511+ " movq 56(%1), %%mm3\n"
30512+ " movq %%mm0, 32(%2)\n"
30513+ " movq %%mm1, 40(%2)\n"
30514+ " movq %%mm2, 48(%2)\n"
30515+ " movq %%mm3, 56(%2)\n"
30516 ".section .fixup, \"ax\"\n"
30517- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
30518+ "3:\n"
30519+
30520+#ifdef CONFIG_PAX_KERNEXEC
30521+ " movl %%cr0, %0\n"
30522+ " movl %0, %%eax\n"
30523+ " andl $0xFFFEFFFF, %%eax\n"
30524+ " movl %%eax, %%cr0\n"
30525+#endif
30526+
30527+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
30528+
30529+#ifdef CONFIG_PAX_KERNEXEC
30530+ " movl %0, %%cr0\n"
30531+#endif
30532+
30533 " jmp 2b\n"
30534 ".previous\n"
30535 _ASM_EXTABLE(1b, 3b)
30536- : : "r" (from), "r" (to) : "memory");
30537+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
30538
30539 from += 64;
30540 to += 64;
30541@@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
30542 static void fast_copy_page(void *to, void *from)
30543 {
30544 int i;
30545+ unsigned long cr0;
30546
30547 kernel_fpu_begin();
30548
30549@@ -166,42 +196,70 @@ static void fast_copy_page(void *to, void *from)
30550 * but that is for later. -AV
30551 */
30552 __asm__ __volatile__(
30553- "1: prefetch (%0)\n"
30554- " prefetch 64(%0)\n"
30555- " prefetch 128(%0)\n"
30556- " prefetch 192(%0)\n"
30557- " prefetch 256(%0)\n"
30558+ "1: prefetch (%1)\n"
30559+ " prefetch 64(%1)\n"
30560+ " prefetch 128(%1)\n"
30561+ " prefetch 192(%1)\n"
30562+ " prefetch 256(%1)\n"
30563 "2: \n"
30564 ".section .fixup, \"ax\"\n"
30565- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
30566+ "3: \n"
30567+
30568+#ifdef CONFIG_PAX_KERNEXEC
30569+ " movl %%cr0, %0\n"
30570+ " movl %0, %%eax\n"
30571+ " andl $0xFFFEFFFF, %%eax\n"
30572+ " movl %%eax, %%cr0\n"
30573+#endif
30574+
30575+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
30576+
30577+#ifdef CONFIG_PAX_KERNEXEC
30578+ " movl %0, %%cr0\n"
30579+#endif
30580+
30581 " jmp 2b\n"
30582 ".previous\n"
30583- _ASM_EXTABLE(1b, 3b) : : "r" (from));
30584+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
30585
30586 for (i = 0; i < (4096-320)/64; i++) {
30587 __asm__ __volatile__ (
30588- "1: prefetch 320(%0)\n"
30589- "2: movq (%0), %%mm0\n"
30590- " movntq %%mm0, (%1)\n"
30591- " movq 8(%0), %%mm1\n"
30592- " movntq %%mm1, 8(%1)\n"
30593- " movq 16(%0), %%mm2\n"
30594- " movntq %%mm2, 16(%1)\n"
30595- " movq 24(%0), %%mm3\n"
30596- " movntq %%mm3, 24(%1)\n"
30597- " movq 32(%0), %%mm4\n"
30598- " movntq %%mm4, 32(%1)\n"
30599- " movq 40(%0), %%mm5\n"
30600- " movntq %%mm5, 40(%1)\n"
30601- " movq 48(%0), %%mm6\n"
30602- " movntq %%mm6, 48(%1)\n"
30603- " movq 56(%0), %%mm7\n"
30604- " movntq %%mm7, 56(%1)\n"
30605+ "1: prefetch 320(%1)\n"
30606+ "2: movq (%1), %%mm0\n"
30607+ " movntq %%mm0, (%2)\n"
30608+ " movq 8(%1), %%mm1\n"
30609+ " movntq %%mm1, 8(%2)\n"
30610+ " movq 16(%1), %%mm2\n"
30611+ " movntq %%mm2, 16(%2)\n"
30612+ " movq 24(%1), %%mm3\n"
30613+ " movntq %%mm3, 24(%2)\n"
30614+ " movq 32(%1), %%mm4\n"
30615+ " movntq %%mm4, 32(%2)\n"
30616+ " movq 40(%1), %%mm5\n"
30617+ " movntq %%mm5, 40(%2)\n"
30618+ " movq 48(%1), %%mm6\n"
30619+ " movntq %%mm6, 48(%2)\n"
30620+ " movq 56(%1), %%mm7\n"
30621+ " movntq %%mm7, 56(%2)\n"
30622 ".section .fixup, \"ax\"\n"
30623- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
30624+ "3:\n"
30625+
30626+#ifdef CONFIG_PAX_KERNEXEC
30627+ " movl %%cr0, %0\n"
30628+ " movl %0, %%eax\n"
30629+ " andl $0xFFFEFFFF, %%eax\n"
30630+ " movl %%eax, %%cr0\n"
30631+#endif
30632+
30633+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
30634+
30635+#ifdef CONFIG_PAX_KERNEXEC
30636+ " movl %0, %%cr0\n"
30637+#endif
30638+
30639 " jmp 2b\n"
30640 ".previous\n"
30641- _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
30642+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
30643
30644 from += 64;
30645 to += 64;
30646@@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
30647 static void fast_copy_page(void *to, void *from)
30648 {
30649 int i;
30650+ unsigned long cr0;
30651
30652 kernel_fpu_begin();
30653
30654 __asm__ __volatile__ (
30655- "1: prefetch (%0)\n"
30656- " prefetch 64(%0)\n"
30657- " prefetch 128(%0)\n"
30658- " prefetch 192(%0)\n"
30659- " prefetch 256(%0)\n"
30660+ "1: prefetch (%1)\n"
30661+ " prefetch 64(%1)\n"
30662+ " prefetch 128(%1)\n"
30663+ " prefetch 192(%1)\n"
30664+ " prefetch 256(%1)\n"
30665 "2: \n"
30666 ".section .fixup, \"ax\"\n"
30667- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
30668+ "3: \n"
30669+
30670+#ifdef CONFIG_PAX_KERNEXEC
30671+ " movl %%cr0, %0\n"
30672+ " movl %0, %%eax\n"
30673+ " andl $0xFFFEFFFF, %%eax\n"
30674+ " movl %%eax, %%cr0\n"
30675+#endif
30676+
30677+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
30678+
30679+#ifdef CONFIG_PAX_KERNEXEC
30680+ " movl %0, %%cr0\n"
30681+#endif
30682+
30683 " jmp 2b\n"
30684 ".previous\n"
30685- _ASM_EXTABLE(1b, 3b) : : "r" (from));
30686+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
30687
30688 for (i = 0; i < 4096/64; i++) {
30689 __asm__ __volatile__ (
30690- "1: prefetch 320(%0)\n"
30691- "2: movq (%0), %%mm0\n"
30692- " movq 8(%0), %%mm1\n"
30693- " movq 16(%0), %%mm2\n"
30694- " movq 24(%0), %%mm3\n"
30695- " movq %%mm0, (%1)\n"
30696- " movq %%mm1, 8(%1)\n"
30697- " movq %%mm2, 16(%1)\n"
30698- " movq %%mm3, 24(%1)\n"
30699- " movq 32(%0), %%mm0\n"
30700- " movq 40(%0), %%mm1\n"
30701- " movq 48(%0), %%mm2\n"
30702- " movq 56(%0), %%mm3\n"
30703- " movq %%mm0, 32(%1)\n"
30704- " movq %%mm1, 40(%1)\n"
30705- " movq %%mm2, 48(%1)\n"
30706- " movq %%mm3, 56(%1)\n"
30707+ "1: prefetch 320(%1)\n"
30708+ "2: movq (%1), %%mm0\n"
30709+ " movq 8(%1), %%mm1\n"
30710+ " movq 16(%1), %%mm2\n"
30711+ " movq 24(%1), %%mm3\n"
30712+ " movq %%mm0, (%2)\n"
30713+ " movq %%mm1, 8(%2)\n"
30714+ " movq %%mm2, 16(%2)\n"
30715+ " movq %%mm3, 24(%2)\n"
30716+ " movq 32(%1), %%mm0\n"
30717+ " movq 40(%1), %%mm1\n"
30718+ " movq 48(%1), %%mm2\n"
30719+ " movq 56(%1), %%mm3\n"
30720+ " movq %%mm0, 32(%2)\n"
30721+ " movq %%mm1, 40(%2)\n"
30722+ " movq %%mm2, 48(%2)\n"
30723+ " movq %%mm3, 56(%2)\n"
30724 ".section .fixup, \"ax\"\n"
30725- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
30726+ "3:\n"
30727+
30728+#ifdef CONFIG_PAX_KERNEXEC
30729+ " movl %%cr0, %0\n"
30730+ " movl %0, %%eax\n"
30731+ " andl $0xFFFEFFFF, %%eax\n"
30732+ " movl %%eax, %%cr0\n"
30733+#endif
30734+
30735+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
30736+
30737+#ifdef CONFIG_PAX_KERNEXEC
30738+ " movl %0, %%cr0\n"
30739+#endif
30740+
30741 " jmp 2b\n"
30742 ".previous\n"
30743 _ASM_EXTABLE(1b, 3b)
30744- : : "r" (from), "r" (to) : "memory");
30745+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
30746
30747 from += 64;
30748 to += 64;
30749diff --git a/arch/x86/lib/msr-reg.S b/arch/x86/lib/msr-reg.S
30750index f6d13ee..d789440 100644
30751--- a/arch/x86/lib/msr-reg.S
30752+++ b/arch/x86/lib/msr-reg.S
30753@@ -3,6 +3,7 @@
30754 #include <asm/dwarf2.h>
30755 #include <asm/asm.h>
30756 #include <asm/msr.h>
30757+#include <asm/alternative-asm.h>
30758
30759 #ifdef CONFIG_X86_64
30760 /*
30761@@ -37,6 +38,7 @@ ENTRY(\op\()_safe_regs)
30762 movl %edi, 28(%r10)
30763 popq_cfi %rbp
30764 popq_cfi %rbx
30765+ pax_force_retaddr
30766 ret
30767 3:
30768 CFI_RESTORE_STATE
30769diff --git a/arch/x86/lib/putuser.S b/arch/x86/lib/putuser.S
30770index fc6ba17..d4d989d 100644
30771--- a/arch/x86/lib/putuser.S
30772+++ b/arch/x86/lib/putuser.S
30773@@ -16,7 +16,9 @@
30774 #include <asm/errno.h>
30775 #include <asm/asm.h>
30776 #include <asm/smap.h>
30777-
30778+#include <asm/segment.h>
30779+#include <asm/pgtable.h>
30780+#include <asm/alternative-asm.h>
30781
30782 /*
30783 * __put_user_X
30784@@ -30,57 +32,125 @@
30785 * as they get called from within inline assembly.
30786 */
30787
30788-#define ENTER CFI_STARTPROC ; \
30789- GET_THREAD_INFO(%_ASM_BX)
30790-#define EXIT ASM_CLAC ; \
30791- ret ; \
30792+#define ENTER CFI_STARTPROC
30793+#define EXIT ASM_CLAC ; \
30794+ pax_force_retaddr ; \
30795+ ret ; \
30796 CFI_ENDPROC
30797
30798+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
30799+#define _DEST %_ASM_CX,%_ASM_BX
30800+#else
30801+#define _DEST %_ASM_CX
30802+#endif
30803+
30804+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
30805+#define __copyuser_seg gs;
30806+#else
30807+#define __copyuser_seg
30808+#endif
30809+
30810 .text
30811 ENTRY(__put_user_1)
30812 ENTER
30813+
30814+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
30815+ GET_THREAD_INFO(%_ASM_BX)
30816 cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
30817 jae bad_put_user
30818 ASM_STAC
30819-1: movb %al,(%_ASM_CX)
30820+
30821+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
30822+ mov pax_user_shadow_base,%_ASM_BX
30823+ cmp %_ASM_BX,%_ASM_CX
30824+ jb 1234f
30825+ xor %ebx,%ebx
30826+1234:
30827+#endif
30828+
30829+#endif
30830+
30831+1: __copyuser_seg movb %al,(_DEST)
30832 xor %eax,%eax
30833 EXIT
30834 ENDPROC(__put_user_1)
30835
30836 ENTRY(__put_user_2)
30837 ENTER
30838+
30839+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
30840+ GET_THREAD_INFO(%_ASM_BX)
30841 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
30842 sub $1,%_ASM_BX
30843 cmp %_ASM_BX,%_ASM_CX
30844 jae bad_put_user
30845 ASM_STAC
30846-2: movw %ax,(%_ASM_CX)
30847+
30848+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
30849+ mov pax_user_shadow_base,%_ASM_BX
30850+ cmp %_ASM_BX,%_ASM_CX
30851+ jb 1234f
30852+ xor %ebx,%ebx
30853+1234:
30854+#endif
30855+
30856+#endif
30857+
30858+2: __copyuser_seg movw %ax,(_DEST)
30859 xor %eax,%eax
30860 EXIT
30861 ENDPROC(__put_user_2)
30862
30863 ENTRY(__put_user_4)
30864 ENTER
30865+
30866+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
30867+ GET_THREAD_INFO(%_ASM_BX)
30868 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
30869 sub $3,%_ASM_BX
30870 cmp %_ASM_BX,%_ASM_CX
30871 jae bad_put_user
30872 ASM_STAC
30873-3: movl %eax,(%_ASM_CX)
30874+
30875+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
30876+ mov pax_user_shadow_base,%_ASM_BX
30877+ cmp %_ASM_BX,%_ASM_CX
30878+ jb 1234f
30879+ xor %ebx,%ebx
30880+1234:
30881+#endif
30882+
30883+#endif
30884+
30885+3: __copyuser_seg movl %eax,(_DEST)
30886 xor %eax,%eax
30887 EXIT
30888 ENDPROC(__put_user_4)
30889
30890 ENTRY(__put_user_8)
30891 ENTER
30892+
30893+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
30894+ GET_THREAD_INFO(%_ASM_BX)
30895 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
30896 sub $7,%_ASM_BX
30897 cmp %_ASM_BX,%_ASM_CX
30898 jae bad_put_user
30899 ASM_STAC
30900-4: mov %_ASM_AX,(%_ASM_CX)
30901+
30902+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
30903+ mov pax_user_shadow_base,%_ASM_BX
30904+ cmp %_ASM_BX,%_ASM_CX
30905+ jb 1234f
30906+ xor %ebx,%ebx
30907+1234:
30908+#endif
30909+
30910+#endif
30911+
30912+4: __copyuser_seg mov %_ASM_AX,(_DEST)
30913 #ifdef CONFIG_X86_32
30914-5: movl %edx,4(%_ASM_CX)
30915+5: __copyuser_seg movl %edx,4(_DEST)
30916 #endif
30917 xor %eax,%eax
30918 EXIT
30919diff --git a/arch/x86/lib/rwlock.S b/arch/x86/lib/rwlock.S
30920index 1cad221..de671ee 100644
30921--- a/arch/x86/lib/rwlock.S
30922+++ b/arch/x86/lib/rwlock.S
30923@@ -16,13 +16,34 @@ ENTRY(__write_lock_failed)
30924 FRAME
30925 0: LOCK_PREFIX
30926 WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr)
30927+
30928+#ifdef CONFIG_PAX_REFCOUNT
30929+ jno 1234f
30930+ LOCK_PREFIX
30931+ WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr)
30932+ int $4
30933+1234:
30934+ _ASM_EXTABLE(1234b, 1234b)
30935+#endif
30936+
30937 1: rep; nop
30938 cmpl $WRITE_LOCK_CMP, (%__lock_ptr)
30939 jne 1b
30940 LOCK_PREFIX
30941 WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr)
30942+
30943+#ifdef CONFIG_PAX_REFCOUNT
30944+ jno 1234f
30945+ LOCK_PREFIX
30946+ WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr)
30947+ int $4
30948+1234:
30949+ _ASM_EXTABLE(1234b, 1234b)
30950+#endif
30951+
30952 jnz 0b
30953 ENDFRAME
30954+ pax_force_retaddr
30955 ret
30956 CFI_ENDPROC
30957 END(__write_lock_failed)
30958@@ -32,13 +53,34 @@ ENTRY(__read_lock_failed)
30959 FRAME
30960 0: LOCK_PREFIX
30961 READ_LOCK_SIZE(inc) (%__lock_ptr)
30962+
30963+#ifdef CONFIG_PAX_REFCOUNT
30964+ jno 1234f
30965+ LOCK_PREFIX
30966+ READ_LOCK_SIZE(dec) (%__lock_ptr)
30967+ int $4
30968+1234:
30969+ _ASM_EXTABLE(1234b, 1234b)
30970+#endif
30971+
30972 1: rep; nop
30973 READ_LOCK_SIZE(cmp) $1, (%__lock_ptr)
30974 js 1b
30975 LOCK_PREFIX
30976 READ_LOCK_SIZE(dec) (%__lock_ptr)
30977+
30978+#ifdef CONFIG_PAX_REFCOUNT
30979+ jno 1234f
30980+ LOCK_PREFIX
30981+ READ_LOCK_SIZE(inc) (%__lock_ptr)
30982+ int $4
30983+1234:
30984+ _ASM_EXTABLE(1234b, 1234b)
30985+#endif
30986+
30987 js 0b
30988 ENDFRAME
30989+ pax_force_retaddr
30990 ret
30991 CFI_ENDPROC
30992 END(__read_lock_failed)
30993diff --git a/arch/x86/lib/rwsem.S b/arch/x86/lib/rwsem.S
30994index 5dff5f0..cadebf4 100644
30995--- a/arch/x86/lib/rwsem.S
30996+++ b/arch/x86/lib/rwsem.S
30997@@ -94,6 +94,7 @@ ENTRY(call_rwsem_down_read_failed)
30998 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
30999 CFI_RESTORE __ASM_REG(dx)
31000 restore_common_regs
31001+ pax_force_retaddr
31002 ret
31003 CFI_ENDPROC
31004 ENDPROC(call_rwsem_down_read_failed)
31005@@ -104,6 +105,7 @@ ENTRY(call_rwsem_down_write_failed)
31006 movq %rax,%rdi
31007 call rwsem_down_write_failed
31008 restore_common_regs
31009+ pax_force_retaddr
31010 ret
31011 CFI_ENDPROC
31012 ENDPROC(call_rwsem_down_write_failed)
31013@@ -117,7 +119,8 @@ ENTRY(call_rwsem_wake)
31014 movq %rax,%rdi
31015 call rwsem_wake
31016 restore_common_regs
31017-1: ret
31018+1: pax_force_retaddr
31019+ ret
31020 CFI_ENDPROC
31021 ENDPROC(call_rwsem_wake)
31022
31023@@ -131,6 +134,7 @@ ENTRY(call_rwsem_downgrade_wake)
31024 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
31025 CFI_RESTORE __ASM_REG(dx)
31026 restore_common_regs
31027+ pax_force_retaddr
31028 ret
31029 CFI_ENDPROC
31030 ENDPROC(call_rwsem_downgrade_wake)
31031diff --git a/arch/x86/lib/thunk_64.S b/arch/x86/lib/thunk_64.S
31032index a63efd6..8149fbe 100644
31033--- a/arch/x86/lib/thunk_64.S
31034+++ b/arch/x86/lib/thunk_64.S
31035@@ -8,6 +8,7 @@
31036 #include <linux/linkage.h>
31037 #include <asm/dwarf2.h>
31038 #include <asm/calling.h>
31039+#include <asm/alternative-asm.h>
31040
31041 /* rdi: arg1 ... normal C conventions. rax is saved/restored. */
31042 .macro THUNK name, func, put_ret_addr_in_rdi=0
31043@@ -15,11 +16,11 @@
31044 \name:
31045 CFI_STARTPROC
31046
31047- /* this one pushes 9 elems, the next one would be %rIP */
31048- SAVE_ARGS
31049+ /* this one pushes 15+1 elems, the next one would be %rIP */
31050+ SAVE_ARGS 8
31051
31052 .if \put_ret_addr_in_rdi
31053- movq_cfi_restore 9*8, rdi
31054+ movq_cfi_restore RIP, rdi
31055 .endif
31056
31057 call \func
31058@@ -38,8 +39,9 @@
31059
31060 /* SAVE_ARGS below is used only for the .cfi directives it contains. */
31061 CFI_STARTPROC
31062- SAVE_ARGS
31063+ SAVE_ARGS 8
31064 restore:
31065- RESTORE_ARGS
31066+ RESTORE_ARGS 1,8
31067+ pax_force_retaddr
31068 ret
31069 CFI_ENDPROC
31070diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
31071index e2f5e21..4b22130 100644
31072--- a/arch/x86/lib/usercopy_32.c
31073+++ b/arch/x86/lib/usercopy_32.c
31074@@ -42,11 +42,13 @@ do { \
31075 int __d0; \
31076 might_fault(); \
31077 __asm__ __volatile__( \
31078+ __COPYUSER_SET_ES \
31079 ASM_STAC "\n" \
31080 "0: rep; stosl\n" \
31081 " movl %2,%0\n" \
31082 "1: rep; stosb\n" \
31083 "2: " ASM_CLAC "\n" \
31084+ __COPYUSER_RESTORE_ES \
31085 ".section .fixup,\"ax\"\n" \
31086 "3: lea 0(%2,%0,4),%0\n" \
31087 " jmp 2b\n" \
31088@@ -98,7 +100,7 @@ EXPORT_SYMBOL(__clear_user);
31089
31090 #ifdef CONFIG_X86_INTEL_USERCOPY
31091 static unsigned long
31092-__copy_user_intel(void __user *to, const void *from, unsigned long size)
31093+__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
31094 {
31095 int d0, d1;
31096 __asm__ __volatile__(
31097@@ -110,36 +112,36 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
31098 " .align 2,0x90\n"
31099 "3: movl 0(%4), %%eax\n"
31100 "4: movl 4(%4), %%edx\n"
31101- "5: movl %%eax, 0(%3)\n"
31102- "6: movl %%edx, 4(%3)\n"
31103+ "5: "__copyuser_seg" movl %%eax, 0(%3)\n"
31104+ "6: "__copyuser_seg" movl %%edx, 4(%3)\n"
31105 "7: movl 8(%4), %%eax\n"
31106 "8: movl 12(%4),%%edx\n"
31107- "9: movl %%eax, 8(%3)\n"
31108- "10: movl %%edx, 12(%3)\n"
31109+ "9: "__copyuser_seg" movl %%eax, 8(%3)\n"
31110+ "10: "__copyuser_seg" movl %%edx, 12(%3)\n"
31111 "11: movl 16(%4), %%eax\n"
31112 "12: movl 20(%4), %%edx\n"
31113- "13: movl %%eax, 16(%3)\n"
31114- "14: movl %%edx, 20(%3)\n"
31115+ "13: "__copyuser_seg" movl %%eax, 16(%3)\n"
31116+ "14: "__copyuser_seg" movl %%edx, 20(%3)\n"
31117 "15: movl 24(%4), %%eax\n"
31118 "16: movl 28(%4), %%edx\n"
31119- "17: movl %%eax, 24(%3)\n"
31120- "18: movl %%edx, 28(%3)\n"
31121+ "17: "__copyuser_seg" movl %%eax, 24(%3)\n"
31122+ "18: "__copyuser_seg" movl %%edx, 28(%3)\n"
31123 "19: movl 32(%4), %%eax\n"
31124 "20: movl 36(%4), %%edx\n"
31125- "21: movl %%eax, 32(%3)\n"
31126- "22: movl %%edx, 36(%3)\n"
31127+ "21: "__copyuser_seg" movl %%eax, 32(%3)\n"
31128+ "22: "__copyuser_seg" movl %%edx, 36(%3)\n"
31129 "23: movl 40(%4), %%eax\n"
31130 "24: movl 44(%4), %%edx\n"
31131- "25: movl %%eax, 40(%3)\n"
31132- "26: movl %%edx, 44(%3)\n"
31133+ "25: "__copyuser_seg" movl %%eax, 40(%3)\n"
31134+ "26: "__copyuser_seg" movl %%edx, 44(%3)\n"
31135 "27: movl 48(%4), %%eax\n"
31136 "28: movl 52(%4), %%edx\n"
31137- "29: movl %%eax, 48(%3)\n"
31138- "30: movl %%edx, 52(%3)\n"
31139+ "29: "__copyuser_seg" movl %%eax, 48(%3)\n"
31140+ "30: "__copyuser_seg" movl %%edx, 52(%3)\n"
31141 "31: movl 56(%4), %%eax\n"
31142 "32: movl 60(%4), %%edx\n"
31143- "33: movl %%eax, 56(%3)\n"
31144- "34: movl %%edx, 60(%3)\n"
31145+ "33: "__copyuser_seg" movl %%eax, 56(%3)\n"
31146+ "34: "__copyuser_seg" movl %%edx, 60(%3)\n"
31147 " addl $-64, %0\n"
31148 " addl $64, %4\n"
31149 " addl $64, %3\n"
31150@@ -149,10 +151,116 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
31151 " shrl $2, %0\n"
31152 " andl $3, %%eax\n"
31153 " cld\n"
31154+ __COPYUSER_SET_ES
31155 "99: rep; movsl\n"
31156 "36: movl %%eax, %0\n"
31157 "37: rep; movsb\n"
31158 "100:\n"
31159+ __COPYUSER_RESTORE_ES
31160+ ".section .fixup,\"ax\"\n"
31161+ "101: lea 0(%%eax,%0,4),%0\n"
31162+ " jmp 100b\n"
31163+ ".previous\n"
31164+ _ASM_EXTABLE(1b,100b)
31165+ _ASM_EXTABLE(2b,100b)
31166+ _ASM_EXTABLE(3b,100b)
31167+ _ASM_EXTABLE(4b,100b)
31168+ _ASM_EXTABLE(5b,100b)
31169+ _ASM_EXTABLE(6b,100b)
31170+ _ASM_EXTABLE(7b,100b)
31171+ _ASM_EXTABLE(8b,100b)
31172+ _ASM_EXTABLE(9b,100b)
31173+ _ASM_EXTABLE(10b,100b)
31174+ _ASM_EXTABLE(11b,100b)
31175+ _ASM_EXTABLE(12b,100b)
31176+ _ASM_EXTABLE(13b,100b)
31177+ _ASM_EXTABLE(14b,100b)
31178+ _ASM_EXTABLE(15b,100b)
31179+ _ASM_EXTABLE(16b,100b)
31180+ _ASM_EXTABLE(17b,100b)
31181+ _ASM_EXTABLE(18b,100b)
31182+ _ASM_EXTABLE(19b,100b)
31183+ _ASM_EXTABLE(20b,100b)
31184+ _ASM_EXTABLE(21b,100b)
31185+ _ASM_EXTABLE(22b,100b)
31186+ _ASM_EXTABLE(23b,100b)
31187+ _ASM_EXTABLE(24b,100b)
31188+ _ASM_EXTABLE(25b,100b)
31189+ _ASM_EXTABLE(26b,100b)
31190+ _ASM_EXTABLE(27b,100b)
31191+ _ASM_EXTABLE(28b,100b)
31192+ _ASM_EXTABLE(29b,100b)
31193+ _ASM_EXTABLE(30b,100b)
31194+ _ASM_EXTABLE(31b,100b)
31195+ _ASM_EXTABLE(32b,100b)
31196+ _ASM_EXTABLE(33b,100b)
31197+ _ASM_EXTABLE(34b,100b)
31198+ _ASM_EXTABLE(35b,100b)
31199+ _ASM_EXTABLE(36b,100b)
31200+ _ASM_EXTABLE(37b,100b)
31201+ _ASM_EXTABLE(99b,101b)
31202+ : "=&c"(size), "=&D" (d0), "=&S" (d1)
31203+ : "1"(to), "2"(from), "0"(size)
31204+ : "eax", "edx", "memory");
31205+ return size;
31206+}
31207+
31208+static unsigned long
31209+__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
31210+{
31211+ int d0, d1;
31212+ __asm__ __volatile__(
31213+ " .align 2,0x90\n"
31214+ "1: "__copyuser_seg" movl 32(%4), %%eax\n"
31215+ " cmpl $67, %0\n"
31216+ " jbe 3f\n"
31217+ "2: "__copyuser_seg" movl 64(%4), %%eax\n"
31218+ " .align 2,0x90\n"
31219+ "3: "__copyuser_seg" movl 0(%4), %%eax\n"
31220+ "4: "__copyuser_seg" movl 4(%4), %%edx\n"
31221+ "5: movl %%eax, 0(%3)\n"
31222+ "6: movl %%edx, 4(%3)\n"
31223+ "7: "__copyuser_seg" movl 8(%4), %%eax\n"
31224+ "8: "__copyuser_seg" movl 12(%4),%%edx\n"
31225+ "9: movl %%eax, 8(%3)\n"
31226+ "10: movl %%edx, 12(%3)\n"
31227+ "11: "__copyuser_seg" movl 16(%4), %%eax\n"
31228+ "12: "__copyuser_seg" movl 20(%4), %%edx\n"
31229+ "13: movl %%eax, 16(%3)\n"
31230+ "14: movl %%edx, 20(%3)\n"
31231+ "15: "__copyuser_seg" movl 24(%4), %%eax\n"
31232+ "16: "__copyuser_seg" movl 28(%4), %%edx\n"
31233+ "17: movl %%eax, 24(%3)\n"
31234+ "18: movl %%edx, 28(%3)\n"
31235+ "19: "__copyuser_seg" movl 32(%4), %%eax\n"
31236+ "20: "__copyuser_seg" movl 36(%4), %%edx\n"
31237+ "21: movl %%eax, 32(%3)\n"
31238+ "22: movl %%edx, 36(%3)\n"
31239+ "23: "__copyuser_seg" movl 40(%4), %%eax\n"
31240+ "24: "__copyuser_seg" movl 44(%4), %%edx\n"
31241+ "25: movl %%eax, 40(%3)\n"
31242+ "26: movl %%edx, 44(%3)\n"
31243+ "27: "__copyuser_seg" movl 48(%4), %%eax\n"
31244+ "28: "__copyuser_seg" movl 52(%4), %%edx\n"
31245+ "29: movl %%eax, 48(%3)\n"
31246+ "30: movl %%edx, 52(%3)\n"
31247+ "31: "__copyuser_seg" movl 56(%4), %%eax\n"
31248+ "32: "__copyuser_seg" movl 60(%4), %%edx\n"
31249+ "33: movl %%eax, 56(%3)\n"
31250+ "34: movl %%edx, 60(%3)\n"
31251+ " addl $-64, %0\n"
31252+ " addl $64, %4\n"
31253+ " addl $64, %3\n"
31254+ " cmpl $63, %0\n"
31255+ " ja 1b\n"
31256+ "35: movl %0, %%eax\n"
31257+ " shrl $2, %0\n"
31258+ " andl $3, %%eax\n"
31259+ " cld\n"
31260+ "99: rep; "__copyuser_seg" movsl\n"
31261+ "36: movl %%eax, %0\n"
31262+ "37: rep; "__copyuser_seg" movsb\n"
31263+ "100:\n"
31264 ".section .fixup,\"ax\"\n"
31265 "101: lea 0(%%eax,%0,4),%0\n"
31266 " jmp 100b\n"
31267@@ -207,41 +315,41 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
31268 int d0, d1;
31269 __asm__ __volatile__(
31270 " .align 2,0x90\n"
31271- "0: movl 32(%4), %%eax\n"
31272+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
31273 " cmpl $67, %0\n"
31274 " jbe 2f\n"
31275- "1: movl 64(%4), %%eax\n"
31276+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
31277 " .align 2,0x90\n"
31278- "2: movl 0(%4), %%eax\n"
31279- "21: movl 4(%4), %%edx\n"
31280+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
31281+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
31282 " movl %%eax, 0(%3)\n"
31283 " movl %%edx, 4(%3)\n"
31284- "3: movl 8(%4), %%eax\n"
31285- "31: movl 12(%4),%%edx\n"
31286+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
31287+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
31288 " movl %%eax, 8(%3)\n"
31289 " movl %%edx, 12(%3)\n"
31290- "4: movl 16(%4), %%eax\n"
31291- "41: movl 20(%4), %%edx\n"
31292+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
31293+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
31294 " movl %%eax, 16(%3)\n"
31295 " movl %%edx, 20(%3)\n"
31296- "10: movl 24(%4), %%eax\n"
31297- "51: movl 28(%4), %%edx\n"
31298+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
31299+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
31300 " movl %%eax, 24(%3)\n"
31301 " movl %%edx, 28(%3)\n"
31302- "11: movl 32(%4), %%eax\n"
31303- "61: movl 36(%4), %%edx\n"
31304+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
31305+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
31306 " movl %%eax, 32(%3)\n"
31307 " movl %%edx, 36(%3)\n"
31308- "12: movl 40(%4), %%eax\n"
31309- "71: movl 44(%4), %%edx\n"
31310+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
31311+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
31312 " movl %%eax, 40(%3)\n"
31313 " movl %%edx, 44(%3)\n"
31314- "13: movl 48(%4), %%eax\n"
31315- "81: movl 52(%4), %%edx\n"
31316+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
31317+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
31318 " movl %%eax, 48(%3)\n"
31319 " movl %%edx, 52(%3)\n"
31320- "14: movl 56(%4), %%eax\n"
31321- "91: movl 60(%4), %%edx\n"
31322+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
31323+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
31324 " movl %%eax, 56(%3)\n"
31325 " movl %%edx, 60(%3)\n"
31326 " addl $-64, %0\n"
31327@@ -253,9 +361,9 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
31328 " shrl $2, %0\n"
31329 " andl $3, %%eax\n"
31330 " cld\n"
31331- "6: rep; movsl\n"
31332+ "6: rep; "__copyuser_seg" movsl\n"
31333 " movl %%eax,%0\n"
31334- "7: rep; movsb\n"
31335+ "7: rep; "__copyuser_seg" movsb\n"
31336 "8:\n"
31337 ".section .fixup,\"ax\"\n"
31338 "9: lea 0(%%eax,%0,4),%0\n"
31339@@ -305,41 +413,41 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
31340
31341 __asm__ __volatile__(
31342 " .align 2,0x90\n"
31343- "0: movl 32(%4), %%eax\n"
31344+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
31345 " cmpl $67, %0\n"
31346 " jbe 2f\n"
31347- "1: movl 64(%4), %%eax\n"
31348+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
31349 " .align 2,0x90\n"
31350- "2: movl 0(%4), %%eax\n"
31351- "21: movl 4(%4), %%edx\n"
31352+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
31353+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
31354 " movnti %%eax, 0(%3)\n"
31355 " movnti %%edx, 4(%3)\n"
31356- "3: movl 8(%4), %%eax\n"
31357- "31: movl 12(%4),%%edx\n"
31358+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
31359+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
31360 " movnti %%eax, 8(%3)\n"
31361 " movnti %%edx, 12(%3)\n"
31362- "4: movl 16(%4), %%eax\n"
31363- "41: movl 20(%4), %%edx\n"
31364+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
31365+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
31366 " movnti %%eax, 16(%3)\n"
31367 " movnti %%edx, 20(%3)\n"
31368- "10: movl 24(%4), %%eax\n"
31369- "51: movl 28(%4), %%edx\n"
31370+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
31371+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
31372 " movnti %%eax, 24(%3)\n"
31373 " movnti %%edx, 28(%3)\n"
31374- "11: movl 32(%4), %%eax\n"
31375- "61: movl 36(%4), %%edx\n"
31376+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
31377+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
31378 " movnti %%eax, 32(%3)\n"
31379 " movnti %%edx, 36(%3)\n"
31380- "12: movl 40(%4), %%eax\n"
31381- "71: movl 44(%4), %%edx\n"
31382+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
31383+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
31384 " movnti %%eax, 40(%3)\n"
31385 " movnti %%edx, 44(%3)\n"
31386- "13: movl 48(%4), %%eax\n"
31387- "81: movl 52(%4), %%edx\n"
31388+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
31389+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
31390 " movnti %%eax, 48(%3)\n"
31391 " movnti %%edx, 52(%3)\n"
31392- "14: movl 56(%4), %%eax\n"
31393- "91: movl 60(%4), %%edx\n"
31394+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
31395+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
31396 " movnti %%eax, 56(%3)\n"
31397 " movnti %%edx, 60(%3)\n"
31398 " addl $-64, %0\n"
31399@@ -352,9 +460,9 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
31400 " shrl $2, %0\n"
31401 " andl $3, %%eax\n"
31402 " cld\n"
31403- "6: rep; movsl\n"
31404+ "6: rep; "__copyuser_seg" movsl\n"
31405 " movl %%eax,%0\n"
31406- "7: rep; movsb\n"
31407+ "7: rep; "__copyuser_seg" movsb\n"
31408 "8:\n"
31409 ".section .fixup,\"ax\"\n"
31410 "9: lea 0(%%eax,%0,4),%0\n"
31411@@ -399,41 +507,41 @@ static unsigned long __copy_user_intel_nocache(void *to,
31412
31413 __asm__ __volatile__(
31414 " .align 2,0x90\n"
31415- "0: movl 32(%4), %%eax\n"
31416+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
31417 " cmpl $67, %0\n"
31418 " jbe 2f\n"
31419- "1: movl 64(%4), %%eax\n"
31420+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
31421 " .align 2,0x90\n"
31422- "2: movl 0(%4), %%eax\n"
31423- "21: movl 4(%4), %%edx\n"
31424+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
31425+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
31426 " movnti %%eax, 0(%3)\n"
31427 " movnti %%edx, 4(%3)\n"
31428- "3: movl 8(%4), %%eax\n"
31429- "31: movl 12(%4),%%edx\n"
31430+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
31431+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
31432 " movnti %%eax, 8(%3)\n"
31433 " movnti %%edx, 12(%3)\n"
31434- "4: movl 16(%4), %%eax\n"
31435- "41: movl 20(%4), %%edx\n"
31436+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
31437+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
31438 " movnti %%eax, 16(%3)\n"
31439 " movnti %%edx, 20(%3)\n"
31440- "10: movl 24(%4), %%eax\n"
31441- "51: movl 28(%4), %%edx\n"
31442+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
31443+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
31444 " movnti %%eax, 24(%3)\n"
31445 " movnti %%edx, 28(%3)\n"
31446- "11: movl 32(%4), %%eax\n"
31447- "61: movl 36(%4), %%edx\n"
31448+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
31449+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
31450 " movnti %%eax, 32(%3)\n"
31451 " movnti %%edx, 36(%3)\n"
31452- "12: movl 40(%4), %%eax\n"
31453- "71: movl 44(%4), %%edx\n"
31454+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
31455+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
31456 " movnti %%eax, 40(%3)\n"
31457 " movnti %%edx, 44(%3)\n"
31458- "13: movl 48(%4), %%eax\n"
31459- "81: movl 52(%4), %%edx\n"
31460+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
31461+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
31462 " movnti %%eax, 48(%3)\n"
31463 " movnti %%edx, 52(%3)\n"
31464- "14: movl 56(%4), %%eax\n"
31465- "91: movl 60(%4), %%edx\n"
31466+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
31467+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
31468 " movnti %%eax, 56(%3)\n"
31469 " movnti %%edx, 60(%3)\n"
31470 " addl $-64, %0\n"
31471@@ -446,9 +554,9 @@ static unsigned long __copy_user_intel_nocache(void *to,
31472 " shrl $2, %0\n"
31473 " andl $3, %%eax\n"
31474 " cld\n"
31475- "6: rep; movsl\n"
31476+ "6: rep; "__copyuser_seg" movsl\n"
31477 " movl %%eax,%0\n"
31478- "7: rep; movsb\n"
31479+ "7: rep; "__copyuser_seg" movsb\n"
31480 "8:\n"
31481 ".section .fixup,\"ax\"\n"
31482 "9: lea 0(%%eax,%0,4),%0\n"
31483@@ -488,32 +596,36 @@ static unsigned long __copy_user_intel_nocache(void *to,
31484 */
31485 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
31486 unsigned long size);
31487-unsigned long __copy_user_intel(void __user *to, const void *from,
31488+unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
31489+ unsigned long size);
31490+unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
31491 unsigned long size);
31492 unsigned long __copy_user_zeroing_intel_nocache(void *to,
31493 const void __user *from, unsigned long size);
31494 #endif /* CONFIG_X86_INTEL_USERCOPY */
31495
31496 /* Generic arbitrary sized copy. */
31497-#define __copy_user(to, from, size) \
31498+#define __copy_user(to, from, size, prefix, set, restore) \
31499 do { \
31500 int __d0, __d1, __d2; \
31501 __asm__ __volatile__( \
31502+ set \
31503 " cmp $7,%0\n" \
31504 " jbe 1f\n" \
31505 " movl %1,%0\n" \
31506 " negl %0\n" \
31507 " andl $7,%0\n" \
31508 " subl %0,%3\n" \
31509- "4: rep; movsb\n" \
31510+ "4: rep; "prefix"movsb\n" \
31511 " movl %3,%0\n" \
31512 " shrl $2,%0\n" \
31513 " andl $3,%3\n" \
31514 " .align 2,0x90\n" \
31515- "0: rep; movsl\n" \
31516+ "0: rep; "prefix"movsl\n" \
31517 " movl %3,%0\n" \
31518- "1: rep; movsb\n" \
31519+ "1: rep; "prefix"movsb\n" \
31520 "2:\n" \
31521+ restore \
31522 ".section .fixup,\"ax\"\n" \
31523 "5: addl %3,%0\n" \
31524 " jmp 2b\n" \
31525@@ -538,14 +650,14 @@ do { \
31526 " negl %0\n" \
31527 " andl $7,%0\n" \
31528 " subl %0,%3\n" \
31529- "4: rep; movsb\n" \
31530+ "4: rep; "__copyuser_seg"movsb\n" \
31531 " movl %3,%0\n" \
31532 " shrl $2,%0\n" \
31533 " andl $3,%3\n" \
31534 " .align 2,0x90\n" \
31535- "0: rep; movsl\n" \
31536+ "0: rep; "__copyuser_seg"movsl\n" \
31537 " movl %3,%0\n" \
31538- "1: rep; movsb\n" \
31539+ "1: rep; "__copyuser_seg"movsb\n" \
31540 "2:\n" \
31541 ".section .fixup,\"ax\"\n" \
31542 "5: addl %3,%0\n" \
31543@@ -572,9 +684,9 @@ unsigned long __copy_to_user_ll(void __user *to, const void *from,
31544 {
31545 stac();
31546 if (movsl_is_ok(to, from, n))
31547- __copy_user(to, from, n);
31548+ __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
31549 else
31550- n = __copy_user_intel(to, from, n);
31551+ n = __generic_copy_to_user_intel(to, from, n);
31552 clac();
31553 return n;
31554 }
31555@@ -598,10 +710,9 @@ unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from,
31556 {
31557 stac();
31558 if (movsl_is_ok(to, from, n))
31559- __copy_user(to, from, n);
31560+ __copy_user(to, from, n, __copyuser_seg, "", "");
31561 else
31562- n = __copy_user_intel((void __user *)to,
31563- (const void *)from, n);
31564+ n = __generic_copy_from_user_intel(to, from, n);
31565 clac();
31566 return n;
31567 }
31568@@ -632,58 +743,38 @@ unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *fr
31569 if (n > 64 && cpu_has_xmm2)
31570 n = __copy_user_intel_nocache(to, from, n);
31571 else
31572- __copy_user(to, from, n);
31573+ __copy_user(to, from, n, __copyuser_seg, "", "");
31574 #else
31575- __copy_user(to, from, n);
31576+ __copy_user(to, from, n, __copyuser_seg, "", "");
31577 #endif
31578 clac();
31579 return n;
31580 }
31581 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
31582
31583-/**
31584- * copy_to_user: - Copy a block of data into user space.
31585- * @to: Destination address, in user space.
31586- * @from: Source address, in kernel space.
31587- * @n: Number of bytes to copy.
31588- *
31589- * Context: User context only. This function may sleep.
31590- *
31591- * Copy data from kernel space to user space.
31592- *
31593- * Returns number of bytes that could not be copied.
31594- * On success, this will be zero.
31595- */
31596-unsigned long _copy_to_user(void __user *to, const void *from, unsigned n)
31597+#ifdef CONFIG_PAX_MEMORY_UDEREF
31598+void __set_fs(mm_segment_t x)
31599 {
31600- if (access_ok(VERIFY_WRITE, to, n))
31601- n = __copy_to_user(to, from, n);
31602- return n;
31603+ switch (x.seg) {
31604+ case 0:
31605+ loadsegment(gs, 0);
31606+ break;
31607+ case TASK_SIZE_MAX:
31608+ loadsegment(gs, __USER_DS);
31609+ break;
31610+ case -1UL:
31611+ loadsegment(gs, __KERNEL_DS);
31612+ break;
31613+ default:
31614+ BUG();
31615+ }
31616 }
31617-EXPORT_SYMBOL(_copy_to_user);
31618+EXPORT_SYMBOL(__set_fs);
31619
31620-/**
31621- * copy_from_user: - Copy a block of data from user space.
31622- * @to: Destination address, in kernel space.
31623- * @from: Source address, in user space.
31624- * @n: Number of bytes to copy.
31625- *
31626- * Context: User context only. This function may sleep.
31627- *
31628- * Copy data from user space to kernel space.
31629- *
31630- * Returns number of bytes that could not be copied.
31631- * On success, this will be zero.
31632- *
31633- * If some data could not be copied, this function will pad the copied
31634- * data to the requested size using zero bytes.
31635- */
31636-unsigned long _copy_from_user(void *to, const void __user *from, unsigned n)
31637+void set_fs(mm_segment_t x)
31638 {
31639- if (access_ok(VERIFY_READ, from, n))
31640- n = __copy_from_user(to, from, n);
31641- else
31642- memset(to, 0, n);
31643- return n;
31644+ current_thread_info()->addr_limit = x;
31645+ __set_fs(x);
31646 }
31647-EXPORT_SYMBOL(_copy_from_user);
31648+EXPORT_SYMBOL(set_fs);
31649+#endif
31650diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
31651index c905e89..01ab928 100644
31652--- a/arch/x86/lib/usercopy_64.c
31653+++ b/arch/x86/lib/usercopy_64.c
31654@@ -18,6 +18,7 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
31655 might_fault();
31656 /* no memory constraint because it doesn't change any memory gcc knows
31657 about */
31658+ pax_open_userland();
31659 stac();
31660 asm volatile(
31661 " testq %[size8],%[size8]\n"
31662@@ -39,9 +40,10 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
31663 _ASM_EXTABLE(0b,3b)
31664 _ASM_EXTABLE(1b,2b)
31665 : [size8] "=&c"(size), [dst] "=&D" (__d0)
31666- : [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(addr),
31667+ : [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(____m(addr)),
31668 [zero] "r" (0UL), [eight] "r" (8UL));
31669 clac();
31670+ pax_close_userland();
31671 return size;
31672 }
31673 EXPORT_SYMBOL(__clear_user);
31674@@ -54,12 +56,11 @@ unsigned long clear_user(void __user *to, unsigned long n)
31675 }
31676 EXPORT_SYMBOL(clear_user);
31677
31678-unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
31679+unsigned long copy_in_user(void __user *to, const void __user *from, unsigned long len)
31680 {
31681- if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
31682- return copy_user_generic((__force void *)to, (__force void *)from, len);
31683- }
31684- return len;
31685+ if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len))
31686+ return copy_user_generic((void __force_kernel *)____m(to), (void __force_kernel *)____m(from), len);
31687+ return len;
31688 }
31689 EXPORT_SYMBOL(copy_in_user);
31690
31691@@ -69,11 +70,13 @@ EXPORT_SYMBOL(copy_in_user);
31692 * it is not necessary to optimize tail handling.
31693 */
31694 __visible unsigned long
31695-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
31696+copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest)
31697 {
31698 char c;
31699 unsigned zero_len;
31700
31701+ clac();
31702+ pax_close_userland();
31703 for (; len; --len, to++) {
31704 if (__get_user_nocheck(c, from++, sizeof(char)))
31705 break;
31706@@ -84,6 +87,5 @@ copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
31707 for (c = 0, zero_len = len; zerorest && zero_len; --zero_len)
31708 if (__put_user_nocheck(c, to++, sizeof(char)))
31709 break;
31710- clac();
31711 return len;
31712 }
31713diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile
31714index 6a19ad9..1c48f9a 100644
31715--- a/arch/x86/mm/Makefile
31716+++ b/arch/x86/mm/Makefile
31717@@ -30,3 +30,7 @@ obj-$(CONFIG_ACPI_NUMA) += srat.o
31718 obj-$(CONFIG_NUMA_EMU) += numa_emulation.o
31719
31720 obj-$(CONFIG_MEMTEST) += memtest.o
31721+
31722+quote:="
31723+obj-$(CONFIG_X86_64) += uderef_64.o
31724+CFLAGS_uderef_64.o := $(subst $(quote),,$(CONFIG_ARCH_HWEIGHT_CFLAGS))
31725diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
31726index 903ec1e..c4166b2 100644
31727--- a/arch/x86/mm/extable.c
31728+++ b/arch/x86/mm/extable.c
31729@@ -6,12 +6,24 @@
31730 static inline unsigned long
31731 ex_insn_addr(const struct exception_table_entry *x)
31732 {
31733- return (unsigned long)&x->insn + x->insn;
31734+ unsigned long reloc = 0;
31735+
31736+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
31737+ reloc = ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
31738+#endif
31739+
31740+ return (unsigned long)&x->insn + x->insn + reloc;
31741 }
31742 static inline unsigned long
31743 ex_fixup_addr(const struct exception_table_entry *x)
31744 {
31745- return (unsigned long)&x->fixup + x->fixup;
31746+ unsigned long reloc = 0;
31747+
31748+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
31749+ reloc = ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
31750+#endif
31751+
31752+ return (unsigned long)&x->fixup + x->fixup + reloc;
31753 }
31754
31755 int fixup_exception(struct pt_regs *regs)
31756@@ -20,7 +32,7 @@ int fixup_exception(struct pt_regs *regs)
31757 unsigned long new_ip;
31758
31759 #ifdef CONFIG_PNPBIOS
31760- if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
31761+ if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
31762 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
31763 extern u32 pnp_bios_is_utter_crap;
31764 pnp_bios_is_utter_crap = 1;
31765@@ -145,6 +157,13 @@ void sort_extable(struct exception_table_entry *start,
31766 i += 4;
31767 p->fixup -= i;
31768 i += 4;
31769+
31770+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
31771+ BUILD_BUG_ON(!IS_ENABLED(CONFIG_BUILDTIME_EXTABLE_SORT));
31772+ p->insn -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
31773+ p->fixup -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
31774+#endif
31775+
31776 }
31777 }
31778
31779diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
31780index a10c8c7..35a5abb 100644
31781--- a/arch/x86/mm/fault.c
31782+++ b/arch/x86/mm/fault.c
31783@@ -14,11 +14,18 @@
31784 #include <linux/hugetlb.h> /* hstate_index_to_shift */
31785 #include <linux/prefetch.h> /* prefetchw */
31786 #include <linux/context_tracking.h> /* exception_enter(), ... */
31787+#include <linux/unistd.h>
31788+#include <linux/compiler.h>
31789
31790 #include <asm/traps.h> /* dotraplinkage, ... */
31791 #include <asm/pgalloc.h> /* pgd_*(), ... */
31792 #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
31793 #include <asm/fixmap.h> /* VSYSCALL_START */
31794+#include <asm/tlbflush.h>
31795+
31796+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
31797+#include <asm/stacktrace.h>
31798+#endif
31799
31800 #define CREATE_TRACE_POINTS
31801 #include <asm/trace/exceptions.h>
31802@@ -59,7 +66,7 @@ static inline int __kprobes kprobes_fault(struct pt_regs *regs)
31803 int ret = 0;
31804
31805 /* kprobe_running() needs smp_processor_id() */
31806- if (kprobes_built_in() && !user_mode_vm(regs)) {
31807+ if (kprobes_built_in() && !user_mode(regs)) {
31808 preempt_disable();
31809 if (kprobe_running() && kprobe_fault_handler(regs, 14))
31810 ret = 1;
31811@@ -120,7 +127,10 @@ check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr,
31812 return !instr_lo || (instr_lo>>1) == 1;
31813 case 0x00:
31814 /* Prefetch instruction is 0x0F0D or 0x0F18 */
31815- if (probe_kernel_address(instr, opcode))
31816+ if (user_mode(regs)) {
31817+ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
31818+ return 0;
31819+ } else if (probe_kernel_address(instr, opcode))
31820 return 0;
31821
31822 *prefetch = (instr_lo == 0xF) &&
31823@@ -154,7 +164,10 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
31824 while (instr < max_instr) {
31825 unsigned char opcode;
31826
31827- if (probe_kernel_address(instr, opcode))
31828+ if (user_mode(regs)) {
31829+ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
31830+ break;
31831+ } else if (probe_kernel_address(instr, opcode))
31832 break;
31833
31834 instr++;
31835@@ -185,6 +198,34 @@ force_sig_info_fault(int si_signo, int si_code, unsigned long address,
31836 force_sig_info(si_signo, &info, tsk);
31837 }
31838
31839+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
31840+static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address);
31841+#endif
31842+
31843+#ifdef CONFIG_PAX_EMUTRAMP
31844+static int pax_handle_fetch_fault(struct pt_regs *regs);
31845+#endif
31846+
31847+#ifdef CONFIG_PAX_PAGEEXEC
31848+static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
31849+{
31850+ pgd_t *pgd;
31851+ pud_t *pud;
31852+ pmd_t *pmd;
31853+
31854+ pgd = pgd_offset(mm, address);
31855+ if (!pgd_present(*pgd))
31856+ return NULL;
31857+ pud = pud_offset(pgd, address);
31858+ if (!pud_present(*pud))
31859+ return NULL;
31860+ pmd = pmd_offset(pud, address);
31861+ if (!pmd_present(*pmd))
31862+ return NULL;
31863+ return pmd;
31864+}
31865+#endif
31866+
31867 DEFINE_SPINLOCK(pgd_lock);
31868 LIST_HEAD(pgd_list);
31869
31870@@ -235,10 +276,27 @@ void vmalloc_sync_all(void)
31871 for (address = VMALLOC_START & PMD_MASK;
31872 address >= TASK_SIZE && address < FIXADDR_TOP;
31873 address += PMD_SIZE) {
31874+
31875+#ifdef CONFIG_PAX_PER_CPU_PGD
31876+ unsigned long cpu;
31877+#else
31878 struct page *page;
31879+#endif
31880
31881 spin_lock(&pgd_lock);
31882+
31883+#ifdef CONFIG_PAX_PER_CPU_PGD
31884+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
31885+ pgd_t *pgd = get_cpu_pgd(cpu, user);
31886+ pmd_t *ret;
31887+
31888+ ret = vmalloc_sync_one(pgd, address);
31889+ if (!ret)
31890+ break;
31891+ pgd = get_cpu_pgd(cpu, kernel);
31892+#else
31893 list_for_each_entry(page, &pgd_list, lru) {
31894+ pgd_t *pgd;
31895 spinlock_t *pgt_lock;
31896 pmd_t *ret;
31897
31898@@ -246,8 +304,14 @@ void vmalloc_sync_all(void)
31899 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
31900
31901 spin_lock(pgt_lock);
31902- ret = vmalloc_sync_one(page_address(page), address);
31903+ pgd = page_address(page);
31904+#endif
31905+
31906+ ret = vmalloc_sync_one(pgd, address);
31907+
31908+#ifndef CONFIG_PAX_PER_CPU_PGD
31909 spin_unlock(pgt_lock);
31910+#endif
31911
31912 if (!ret)
31913 break;
31914@@ -281,6 +345,12 @@ static noinline __kprobes int vmalloc_fault(unsigned long address)
31915 * an interrupt in the middle of a task switch..
31916 */
31917 pgd_paddr = read_cr3();
31918+
31919+#ifdef CONFIG_PAX_PER_CPU_PGD
31920+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id(), kernel)) != (pgd_paddr & __PHYSICAL_MASK));
31921+ vmalloc_sync_one(__va(pgd_paddr + PAGE_SIZE), address);
31922+#endif
31923+
31924 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
31925 if (!pmd_k)
31926 return -1;
31927@@ -376,11 +446,25 @@ static noinline __kprobes int vmalloc_fault(unsigned long address)
31928 * happen within a race in page table update. In the later
31929 * case just flush:
31930 */
31931- pgd = pgd_offset(current->active_mm, address);
31932+
31933 pgd_ref = pgd_offset_k(address);
31934 if (pgd_none(*pgd_ref))
31935 return -1;
31936
31937+#ifdef CONFIG_PAX_PER_CPU_PGD
31938+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id(), kernel)) != (read_cr3() & __PHYSICAL_MASK));
31939+ pgd = pgd_offset_cpu(smp_processor_id(), user, address);
31940+ if (pgd_none(*pgd)) {
31941+ set_pgd(pgd, *pgd_ref);
31942+ arch_flush_lazy_mmu_mode();
31943+ } else {
31944+ BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
31945+ }
31946+ pgd = pgd_offset_cpu(smp_processor_id(), kernel, address);
31947+#else
31948+ pgd = pgd_offset(current->active_mm, address);
31949+#endif
31950+
31951 if (pgd_none(*pgd)) {
31952 set_pgd(pgd, *pgd_ref);
31953 arch_flush_lazy_mmu_mode();
31954@@ -546,7 +630,7 @@ static int is_errata93(struct pt_regs *regs, unsigned long address)
31955 static int is_errata100(struct pt_regs *regs, unsigned long address)
31956 {
31957 #ifdef CONFIG_X86_64
31958- if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
31959+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
31960 return 1;
31961 #endif
31962 return 0;
31963@@ -573,7 +657,7 @@ static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
31964 }
31965
31966 static const char nx_warning[] = KERN_CRIT
31967-"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
31968+"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
31969
31970 static void
31971 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
31972@@ -582,15 +666,27 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
31973 if (!oops_may_print())
31974 return;
31975
31976- if (error_code & PF_INSTR) {
31977+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) {
31978 unsigned int level;
31979
31980 pte_t *pte = lookup_address(address, &level);
31981
31982 if (pte && pte_present(*pte) && !pte_exec(*pte))
31983- printk(nx_warning, from_kuid(&init_user_ns, current_uid()));
31984+ printk(nx_warning, from_kuid_munged(&init_user_ns, current_uid()), current->comm, task_pid_nr(current));
31985 }
31986
31987+#ifdef CONFIG_PAX_KERNEXEC
31988+ if (init_mm.start_code <= address && address < init_mm.end_code) {
31989+ if (current->signal->curr_ip)
31990+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
31991+ &current->signal->curr_ip, current->comm, task_pid_nr(current),
31992+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
31993+ else
31994+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", current->comm, task_pid_nr(current),
31995+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
31996+ }
31997+#endif
31998+
31999 printk(KERN_ALERT "BUG: unable to handle kernel ");
32000 if (address < PAGE_SIZE)
32001 printk(KERN_CONT "NULL pointer dereference");
32002@@ -771,6 +867,22 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
32003 return;
32004 }
32005 #endif
32006+
32007+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
32008+ if (pax_is_fetch_fault(regs, error_code, address)) {
32009+
32010+#ifdef CONFIG_PAX_EMUTRAMP
32011+ switch (pax_handle_fetch_fault(regs)) {
32012+ case 2:
32013+ return;
32014+ }
32015+#endif
32016+
32017+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
32018+ do_group_exit(SIGKILL);
32019+ }
32020+#endif
32021+
32022 /* Kernel addresses are always protection faults: */
32023 if (address >= TASK_SIZE)
32024 error_code |= PF_PROT;
32025@@ -856,7 +968,7 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
32026 if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
32027 printk(KERN_ERR
32028 "MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
32029- tsk->comm, tsk->pid, address);
32030+ tsk->comm, task_pid_nr(tsk), address);
32031 code = BUS_MCEERR_AR;
32032 }
32033 #endif
32034@@ -910,6 +1022,99 @@ static int spurious_fault_check(unsigned long error_code, pte_t *pte)
32035 return 1;
32036 }
32037
32038+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
32039+static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
32040+{
32041+ pte_t *pte;
32042+ pmd_t *pmd;
32043+ spinlock_t *ptl;
32044+ unsigned char pte_mask;
32045+
32046+ if ((__supported_pte_mask & _PAGE_NX) || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
32047+ !(mm->pax_flags & MF_PAX_PAGEEXEC))
32048+ return 0;
32049+
32050+ /* PaX: it's our fault, let's handle it if we can */
32051+
32052+ /* PaX: take a look at read faults before acquiring any locks */
32053+ if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
32054+ /* instruction fetch attempt from a protected page in user mode */
32055+ up_read(&mm->mmap_sem);
32056+
32057+#ifdef CONFIG_PAX_EMUTRAMP
32058+ switch (pax_handle_fetch_fault(regs)) {
32059+ case 2:
32060+ return 1;
32061+ }
32062+#endif
32063+
32064+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
32065+ do_group_exit(SIGKILL);
32066+ }
32067+
32068+ pmd = pax_get_pmd(mm, address);
32069+ if (unlikely(!pmd))
32070+ return 0;
32071+
32072+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
32073+ if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
32074+ pte_unmap_unlock(pte, ptl);
32075+ return 0;
32076+ }
32077+
32078+ if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
32079+ /* write attempt to a protected page in user mode */
32080+ pte_unmap_unlock(pte, ptl);
32081+ return 0;
32082+ }
32083+
32084+#ifdef CONFIG_SMP
32085+ if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
32086+#else
32087+ if (likely(address > get_limit(regs->cs)))
32088+#endif
32089+ {
32090+ set_pte(pte, pte_mkread(*pte));
32091+ __flush_tlb_one(address);
32092+ pte_unmap_unlock(pte, ptl);
32093+ up_read(&mm->mmap_sem);
32094+ return 1;
32095+ }
32096+
32097+ pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
32098+
32099+ /*
32100+ * PaX: fill DTLB with user rights and retry
32101+ */
32102+ __asm__ __volatile__ (
32103+ "orb %2,(%1)\n"
32104+#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
32105+/*
32106+ * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
32107+ * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
32108+ * page fault when examined during a TLB load attempt. this is true not only
32109+ * for PTEs holding a non-present entry but also present entries that will
32110+ * raise a page fault (such as those set up by PaX, or the copy-on-write
32111+ * mechanism). in effect it means that we do *not* need to flush the TLBs
32112+ * for our target pages since their PTEs are simply not in the TLBs at all.
32113+
32114+ * the best thing in omitting it is that we gain around 15-20% speed in the
32115+ * fast path of the page fault handler and can get rid of tracing since we
32116+ * can no longer flush unintended entries.
32117+ */
32118+ "invlpg (%0)\n"
32119+#endif
32120+ __copyuser_seg"testb $0,(%0)\n"
32121+ "xorb %3,(%1)\n"
32122+ :
32123+ : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
32124+ : "memory", "cc");
32125+ pte_unmap_unlock(pte, ptl);
32126+ up_read(&mm->mmap_sem);
32127+ return 1;
32128+}
32129+#endif
32130+
32131 /*
32132 * Handle a spurious fault caused by a stale TLB entry.
32133 *
32134@@ -976,6 +1181,9 @@ int show_unhandled_signals = 1;
32135 static inline int
32136 access_error(unsigned long error_code, struct vm_area_struct *vma)
32137 {
32138+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
32139+ return 1;
32140+
32141 if (error_code & PF_WRITE) {
32142 /* write, present and write, not present: */
32143 if (unlikely(!(vma->vm_flags & VM_WRITE)))
32144@@ -1010,7 +1218,7 @@ static inline bool smap_violation(int error_code, struct pt_regs *regs)
32145 if (error_code & PF_USER)
32146 return false;
32147
32148- if (!user_mode_vm(regs) && (regs->flags & X86_EFLAGS_AC))
32149+ if (!user_mode(regs) && (regs->flags & X86_EFLAGS_AC))
32150 return false;
32151
32152 return true;
32153@@ -1038,6 +1246,22 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code,
32154 tsk = current;
32155 mm = tsk->mm;
32156
32157+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
32158+ if (!user_mode(regs) && address < 2 * pax_user_shadow_base) {
32159+ if (!search_exception_tables(regs->ip)) {
32160+ printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
32161+ bad_area_nosemaphore(regs, error_code, address);
32162+ return;
32163+ }
32164+ if (address < pax_user_shadow_base) {
32165+ printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
32166+ printk(KERN_ERR "PAX: faulting IP: %pS\n", (void *)regs->ip);
32167+ show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
32168+ } else
32169+ address -= pax_user_shadow_base;
32170+ }
32171+#endif
32172+
32173 /*
32174 * Detect and handle instructions that would cause a page fault for
32175 * both a tracked kernel page and a userspace page.
32176@@ -1115,7 +1339,7 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code,
32177 * User-mode registers count as a user access even for any
32178 * potential system fault or CPU buglet:
32179 */
32180- if (user_mode_vm(regs)) {
32181+ if (user_mode(regs)) {
32182 local_irq_enable();
32183 error_code |= PF_USER;
32184 flags |= FAULT_FLAG_USER;
32185@@ -1162,6 +1386,11 @@ retry:
32186 might_sleep();
32187 }
32188
32189+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
32190+ if (pax_handle_pageexec_fault(regs, mm, address, error_code))
32191+ return;
32192+#endif
32193+
32194 vma = find_vma(mm, address);
32195 if (unlikely(!vma)) {
32196 bad_area(regs, error_code, address);
32197@@ -1173,18 +1402,24 @@ retry:
32198 bad_area(regs, error_code, address);
32199 return;
32200 }
32201- if (error_code & PF_USER) {
32202- /*
32203- * Accessing the stack below %sp is always a bug.
32204- * The large cushion allows instructions like enter
32205- * and pusha to work. ("enter $65535, $31" pushes
32206- * 32 pointers and then decrements %sp by 65535.)
32207- */
32208- if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
32209- bad_area(regs, error_code, address);
32210- return;
32211- }
32212+ /*
32213+ * Accessing the stack below %sp is always a bug.
32214+ * The large cushion allows instructions like enter
32215+ * and pusha to work. ("enter $65535, $31" pushes
32216+ * 32 pointers and then decrements %sp by 65535.)
32217+ */
32218+ if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
32219+ bad_area(regs, error_code, address);
32220+ return;
32221 }
32222+
32223+#ifdef CONFIG_PAX_SEGMEXEC
32224+ if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
32225+ bad_area(regs, error_code, address);
32226+ return;
32227+ }
32228+#endif
32229+
32230 if (unlikely(expand_stack(vma, address))) {
32231 bad_area(regs, error_code, address);
32232 return;
32233@@ -1296,3 +1531,292 @@ trace_do_page_fault(struct pt_regs *regs, unsigned long error_code)
32234 exception_exit(prev_state);
32235 }
32236 #endif /* CONFIG_TRACING */
32237+
32238+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
32239+static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address)
32240+{
32241+ struct mm_struct *mm = current->mm;
32242+ unsigned long ip = regs->ip;
32243+
32244+ if (v8086_mode(regs))
32245+ ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
32246+
32247+#ifdef CONFIG_PAX_PAGEEXEC
32248+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
32249+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR))
32250+ return true;
32251+ if (!(error_code & (PF_PROT | PF_WRITE)) && ip == address)
32252+ return true;
32253+ return false;
32254+ }
32255+#endif
32256+
32257+#ifdef CONFIG_PAX_SEGMEXEC
32258+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
32259+ if (!(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address))
32260+ return true;
32261+ return false;
32262+ }
32263+#endif
32264+
32265+ return false;
32266+}
32267+#endif
32268+
32269+#ifdef CONFIG_PAX_EMUTRAMP
32270+static int pax_handle_fetch_fault_32(struct pt_regs *regs)
32271+{
32272+ int err;
32273+
32274+ do { /* PaX: libffi trampoline emulation */
32275+ unsigned char mov, jmp;
32276+ unsigned int addr1, addr2;
32277+
32278+#ifdef CONFIG_X86_64
32279+ if ((regs->ip + 9) >> 32)
32280+ break;
32281+#endif
32282+
32283+ err = get_user(mov, (unsigned char __user *)regs->ip);
32284+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
32285+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
32286+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
32287+
32288+ if (err)
32289+ break;
32290+
32291+ if (mov == 0xB8 && jmp == 0xE9) {
32292+ regs->ax = addr1;
32293+ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
32294+ return 2;
32295+ }
32296+ } while (0);
32297+
32298+ do { /* PaX: gcc trampoline emulation #1 */
32299+ unsigned char mov1, mov2;
32300+ unsigned short jmp;
32301+ unsigned int addr1, addr2;
32302+
32303+#ifdef CONFIG_X86_64
32304+ if ((regs->ip + 11) >> 32)
32305+ break;
32306+#endif
32307+
32308+ err = get_user(mov1, (unsigned char __user *)regs->ip);
32309+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
32310+ err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
32311+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
32312+ err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
32313+
32314+ if (err)
32315+ break;
32316+
32317+ if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
32318+ regs->cx = addr1;
32319+ regs->ax = addr2;
32320+ regs->ip = addr2;
32321+ return 2;
32322+ }
32323+ } while (0);
32324+
32325+ do { /* PaX: gcc trampoline emulation #2 */
32326+ unsigned char mov, jmp;
32327+ unsigned int addr1, addr2;
32328+
32329+#ifdef CONFIG_X86_64
32330+ if ((regs->ip + 9) >> 32)
32331+ break;
32332+#endif
32333+
32334+ err = get_user(mov, (unsigned char __user *)regs->ip);
32335+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
32336+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
32337+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
32338+
32339+ if (err)
32340+ break;
32341+
32342+ if (mov == 0xB9 && jmp == 0xE9) {
32343+ regs->cx = addr1;
32344+ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
32345+ return 2;
32346+ }
32347+ } while (0);
32348+
32349+ return 1; /* PaX in action */
32350+}
32351+
32352+#ifdef CONFIG_X86_64
32353+static int pax_handle_fetch_fault_64(struct pt_regs *regs)
32354+{
32355+ int err;
32356+
32357+ do { /* PaX: libffi trampoline emulation */
32358+ unsigned short mov1, mov2, jmp1;
32359+ unsigned char stcclc, jmp2;
32360+ unsigned long addr1, addr2;
32361+
32362+ err = get_user(mov1, (unsigned short __user *)regs->ip);
32363+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
32364+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
32365+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
32366+ err |= get_user(stcclc, (unsigned char __user *)(regs->ip + 20));
32367+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 21));
32368+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 23));
32369+
32370+ if (err)
32371+ break;
32372+
32373+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && (stcclc == 0xF8 || stcclc == 0xF9) && jmp1 == 0xFF49 && jmp2 == 0xE3) {
32374+ regs->r11 = addr1;
32375+ regs->r10 = addr2;
32376+ if (stcclc == 0xF8)
32377+ regs->flags &= ~X86_EFLAGS_CF;
32378+ else
32379+ regs->flags |= X86_EFLAGS_CF;
32380+ regs->ip = addr1;
32381+ return 2;
32382+ }
32383+ } while (0);
32384+
32385+ do { /* PaX: gcc trampoline emulation #1 */
32386+ unsigned short mov1, mov2, jmp1;
32387+ unsigned char jmp2;
32388+ unsigned int addr1;
32389+ unsigned long addr2;
32390+
32391+ err = get_user(mov1, (unsigned short __user *)regs->ip);
32392+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
32393+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
32394+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
32395+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
32396+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
32397+
32398+ if (err)
32399+ break;
32400+
32401+ if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
32402+ regs->r11 = addr1;
32403+ regs->r10 = addr2;
32404+ regs->ip = addr1;
32405+ return 2;
32406+ }
32407+ } while (0);
32408+
32409+ do { /* PaX: gcc trampoline emulation #2 */
32410+ unsigned short mov1, mov2, jmp1;
32411+ unsigned char jmp2;
32412+ unsigned long addr1, addr2;
32413+
32414+ err = get_user(mov1, (unsigned short __user *)regs->ip);
32415+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
32416+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
32417+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
32418+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
32419+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
32420+
32421+ if (err)
32422+ break;
32423+
32424+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
32425+ regs->r11 = addr1;
32426+ regs->r10 = addr2;
32427+ regs->ip = addr1;
32428+ return 2;
32429+ }
32430+ } while (0);
32431+
32432+ return 1; /* PaX in action */
32433+}
32434+#endif
32435+
32436+/*
32437+ * PaX: decide what to do with offenders (regs->ip = fault address)
32438+ *
32439+ * returns 1 when task should be killed
32440+ * 2 when gcc trampoline was detected
32441+ */
32442+static int pax_handle_fetch_fault(struct pt_regs *regs)
32443+{
32444+ if (v8086_mode(regs))
32445+ return 1;
32446+
32447+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
32448+ return 1;
32449+
32450+#ifdef CONFIG_X86_32
32451+ return pax_handle_fetch_fault_32(regs);
32452+#else
32453+ if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
32454+ return pax_handle_fetch_fault_32(regs);
32455+ else
32456+ return pax_handle_fetch_fault_64(regs);
32457+#endif
32458+}
32459+#endif
32460+
32461+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
32462+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
32463+{
32464+ long i;
32465+
32466+ printk(KERN_ERR "PAX: bytes at PC: ");
32467+ for (i = 0; i < 20; i++) {
32468+ unsigned char c;
32469+ if (get_user(c, (unsigned char __force_user *)pc+i))
32470+ printk(KERN_CONT "?? ");
32471+ else
32472+ printk(KERN_CONT "%02x ", c);
32473+ }
32474+ printk("\n");
32475+
32476+ printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
32477+ for (i = -1; i < 80 / (long)sizeof(long); i++) {
32478+ unsigned long c;
32479+ if (get_user(c, (unsigned long __force_user *)sp+i)) {
32480+#ifdef CONFIG_X86_32
32481+ printk(KERN_CONT "???????? ");
32482+#else
32483+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)))
32484+ printk(KERN_CONT "???????? ???????? ");
32485+ else
32486+ printk(KERN_CONT "???????????????? ");
32487+#endif
32488+ } else {
32489+#ifdef CONFIG_X86_64
32490+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))) {
32491+ printk(KERN_CONT "%08x ", (unsigned int)c);
32492+ printk(KERN_CONT "%08x ", (unsigned int)(c >> 32));
32493+ } else
32494+#endif
32495+ printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
32496+ }
32497+ }
32498+ printk("\n");
32499+}
32500+#endif
32501+
32502+/**
32503+ * probe_kernel_write(): safely attempt to write to a location
32504+ * @dst: address to write to
32505+ * @src: pointer to the data that shall be written
32506+ * @size: size of the data chunk
32507+ *
32508+ * Safely write to address @dst from the buffer at @src. If a kernel fault
32509+ * happens, handle that and return -EFAULT.
32510+ */
32511+long notrace probe_kernel_write(void *dst, const void *src, size_t size)
32512+{
32513+ long ret;
32514+ mm_segment_t old_fs = get_fs();
32515+
32516+ set_fs(KERNEL_DS);
32517+ pagefault_disable();
32518+ pax_open_kernel();
32519+ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
32520+ pax_close_kernel();
32521+ pagefault_enable();
32522+ set_fs(old_fs);
32523+
32524+ return ret ? -EFAULT : 0;
32525+}
32526diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
32527index 207d9aef..69030980 100644
32528--- a/arch/x86/mm/gup.c
32529+++ b/arch/x86/mm/gup.c
32530@@ -268,7 +268,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
32531 addr = start;
32532 len = (unsigned long) nr_pages << PAGE_SHIFT;
32533 end = start + len;
32534- if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
32535+ if (unlikely(!access_ok_noprefault(write ? VERIFY_WRITE : VERIFY_READ,
32536 (void __user *)start, len)))
32537 return 0;
32538
32539@@ -344,6 +344,10 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
32540 goto slow_irqon;
32541 #endif
32542
32543+ if (unlikely(!access_ok_noprefault(write ? VERIFY_WRITE : VERIFY_READ,
32544+ (void __user *)start, len)))
32545+ return 0;
32546+
32547 /*
32548 * XXX: batch / limit 'nr', to avoid large irq off latency
32549 * needs some instrumenting to determine the common sizes used by
32550diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
32551index 4500142..53a363c 100644
32552--- a/arch/x86/mm/highmem_32.c
32553+++ b/arch/x86/mm/highmem_32.c
32554@@ -45,7 +45,11 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
32555 idx = type + KM_TYPE_NR*smp_processor_id();
32556 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
32557 BUG_ON(!pte_none(*(kmap_pte-idx)));
32558+
32559+ pax_open_kernel();
32560 set_pte(kmap_pte-idx, mk_pte(page, prot));
32561+ pax_close_kernel();
32562+
32563 arch_flush_lazy_mmu_mode();
32564
32565 return (void *)vaddr;
32566diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
32567index 8c9f647..57cb402 100644
32568--- a/arch/x86/mm/hugetlbpage.c
32569+++ b/arch/x86/mm/hugetlbpage.c
32570@@ -90,23 +90,24 @@ int pmd_huge_support(void)
32571 #ifdef CONFIG_HUGETLB_PAGE
32572 static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
32573 unsigned long addr, unsigned long len,
32574- unsigned long pgoff, unsigned long flags)
32575+ unsigned long pgoff, unsigned long flags, unsigned long offset)
32576 {
32577 struct hstate *h = hstate_file(file);
32578 struct vm_unmapped_area_info info;
32579-
32580+
32581 info.flags = 0;
32582 info.length = len;
32583 info.low_limit = current->mm->mmap_legacy_base;
32584 info.high_limit = TASK_SIZE;
32585 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
32586 info.align_offset = 0;
32587+ info.threadstack_offset = offset;
32588 return vm_unmapped_area(&info);
32589 }
32590
32591 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
32592 unsigned long addr0, unsigned long len,
32593- unsigned long pgoff, unsigned long flags)
32594+ unsigned long pgoff, unsigned long flags, unsigned long offset)
32595 {
32596 struct hstate *h = hstate_file(file);
32597 struct vm_unmapped_area_info info;
32598@@ -118,6 +119,7 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
32599 info.high_limit = current->mm->mmap_base;
32600 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
32601 info.align_offset = 0;
32602+ info.threadstack_offset = offset;
32603 addr = vm_unmapped_area(&info);
32604
32605 /*
32606@@ -130,6 +132,12 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
32607 VM_BUG_ON(addr != -ENOMEM);
32608 info.flags = 0;
32609 info.low_limit = TASK_UNMAPPED_BASE;
32610+
32611+#ifdef CONFIG_PAX_RANDMMAP
32612+ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
32613+ info.low_limit += current->mm->delta_mmap;
32614+#endif
32615+
32616 info.high_limit = TASK_SIZE;
32617 addr = vm_unmapped_area(&info);
32618 }
32619@@ -144,10 +152,20 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
32620 struct hstate *h = hstate_file(file);
32621 struct mm_struct *mm = current->mm;
32622 struct vm_area_struct *vma;
32623+ unsigned long pax_task_size = TASK_SIZE;
32624+ unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
32625
32626 if (len & ~huge_page_mask(h))
32627 return -EINVAL;
32628- if (len > TASK_SIZE)
32629+
32630+#ifdef CONFIG_PAX_SEGMEXEC
32631+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
32632+ pax_task_size = SEGMEXEC_TASK_SIZE;
32633+#endif
32634+
32635+ pax_task_size -= PAGE_SIZE;
32636+
32637+ if (len > pax_task_size)
32638 return -ENOMEM;
32639
32640 if (flags & MAP_FIXED) {
32641@@ -156,19 +174,22 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
32642 return addr;
32643 }
32644
32645+#ifdef CONFIG_PAX_RANDMMAP
32646+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
32647+#endif
32648+
32649 if (addr) {
32650 addr = ALIGN(addr, huge_page_size(h));
32651 vma = find_vma(mm, addr);
32652- if (TASK_SIZE - len >= addr &&
32653- (!vma || addr + len <= vma->vm_start))
32654+ if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
32655 return addr;
32656 }
32657 if (mm->get_unmapped_area == arch_get_unmapped_area)
32658 return hugetlb_get_unmapped_area_bottomup(file, addr, len,
32659- pgoff, flags);
32660+ pgoff, flags, offset);
32661 else
32662 return hugetlb_get_unmapped_area_topdown(file, addr, len,
32663- pgoff, flags);
32664+ pgoff, flags, offset);
32665 }
32666 #endif /* CONFIG_HUGETLB_PAGE */
32667
32668diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
32669index f971306..e83e0f6 100644
32670--- a/arch/x86/mm/init.c
32671+++ b/arch/x86/mm/init.c
32672@@ -4,6 +4,7 @@
32673 #include <linux/swap.h>
32674 #include <linux/memblock.h>
32675 #include <linux/bootmem.h> /* for max_low_pfn */
32676+#include <linux/tboot.h>
32677
32678 #include <asm/cacheflush.h>
32679 #include <asm/e820.h>
32680@@ -17,6 +18,8 @@
32681 #include <asm/proto.h>
32682 #include <asm/dma.h> /* for MAX_DMA_PFN */
32683 #include <asm/microcode.h>
32684+#include <asm/desc.h>
32685+#include <asm/bios_ebda.h>
32686
32687 #include "mm_internal.h"
32688
32689@@ -563,7 +566,18 @@ void __init init_mem_mapping(void)
32690 early_ioremap_page_table_range_init();
32691 #endif
32692
32693+#ifdef CONFIG_PAX_PER_CPU_PGD
32694+ clone_pgd_range(get_cpu_pgd(0, kernel) + KERNEL_PGD_BOUNDARY,
32695+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
32696+ KERNEL_PGD_PTRS);
32697+ clone_pgd_range(get_cpu_pgd(0, user) + KERNEL_PGD_BOUNDARY,
32698+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
32699+ KERNEL_PGD_PTRS);
32700+ load_cr3(get_cpu_pgd(0, kernel));
32701+#else
32702 load_cr3(swapper_pg_dir);
32703+#endif
32704+
32705 __flush_tlb_all();
32706
32707 early_memtest(0, max_pfn_mapped << PAGE_SHIFT);
32708@@ -579,10 +593,40 @@ void __init init_mem_mapping(void)
32709 * Access has to be given to non-kernel-ram areas as well, these contain the PCI
32710 * mmio resources as well as potential bios/acpi data regions.
32711 */
32712+
32713+#ifdef CONFIG_GRKERNSEC_KMEM
32714+static unsigned int ebda_start __read_only;
32715+static unsigned int ebda_end __read_only;
32716+#endif
32717+
32718 int devmem_is_allowed(unsigned long pagenr)
32719 {
32720- if (pagenr < 256)
32721+#ifdef CONFIG_GRKERNSEC_KMEM
32722+ /* allow BDA */
32723+ if (!pagenr)
32724 return 1;
32725+ /* allow EBDA */
32726+ if (pagenr >= ebda_start && pagenr < ebda_end)
32727+ return 1;
32728+ /* if tboot is in use, allow access to its hardcoded serial log range */
32729+ if (tboot_enabled() && ((0x60000 >> PAGE_SHIFT) <= pagenr) && (pagenr < (0x68000 >> PAGE_SHIFT)))
32730+ return 1;
32731+#else
32732+ if (!pagenr)
32733+ return 1;
32734+#ifdef CONFIG_VM86
32735+ if (pagenr < (ISA_START_ADDRESS >> PAGE_SHIFT))
32736+ return 1;
32737+#endif
32738+#endif
32739+
32740+ if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
32741+ return 1;
32742+#ifdef CONFIG_GRKERNSEC_KMEM
32743+ /* throw out everything else below 1MB */
32744+ if (pagenr <= 256)
32745+ return 0;
32746+#endif
32747 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
32748 return 0;
32749 if (!page_is_ram(pagenr))
32750@@ -628,8 +672,117 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
32751 #endif
32752 }
32753
32754+#ifdef CONFIG_GRKERNSEC_KMEM
32755+static inline void gr_init_ebda(void)
32756+{
32757+ unsigned int ebda_addr;
32758+ unsigned int ebda_size = 0;
32759+
32760+ ebda_addr = get_bios_ebda();
32761+ if (ebda_addr) {
32762+ ebda_size = *(unsigned char *)phys_to_virt(ebda_addr);
32763+ ebda_size <<= 10;
32764+ }
32765+ if (ebda_addr && ebda_size) {
32766+ ebda_start = ebda_addr >> PAGE_SHIFT;
32767+ ebda_end = min((unsigned int)PAGE_ALIGN(ebda_addr + ebda_size), (unsigned int)0xa0000) >> PAGE_SHIFT;
32768+ } else {
32769+ ebda_start = 0x9f000 >> PAGE_SHIFT;
32770+ ebda_end = 0xa0000 >> PAGE_SHIFT;
32771+ }
32772+}
32773+#else
32774+static inline void gr_init_ebda(void) { }
32775+#endif
32776+
32777 void free_initmem(void)
32778 {
32779+#ifdef CONFIG_PAX_KERNEXEC
32780+#ifdef CONFIG_X86_32
32781+ /* PaX: limit KERNEL_CS to actual size */
32782+ unsigned long addr, limit;
32783+ struct desc_struct d;
32784+ int cpu;
32785+#else
32786+ pgd_t *pgd;
32787+ pud_t *pud;
32788+ pmd_t *pmd;
32789+ unsigned long addr, end;
32790+#endif
32791+#endif
32792+
32793+ gr_init_ebda();
32794+
32795+#ifdef CONFIG_PAX_KERNEXEC
32796+#ifdef CONFIG_X86_32
32797+ limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
32798+ limit = (limit - 1UL) >> PAGE_SHIFT;
32799+
32800+ memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
32801+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
32802+ pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
32803+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
32804+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEXEC_KERNEL_CS, &d, DESCTYPE_S);
32805+ }
32806+
32807+ /* PaX: make KERNEL_CS read-only */
32808+ addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
32809+ if (!paravirt_enabled())
32810+ set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
32811+/*
32812+ for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
32813+ pgd = pgd_offset_k(addr);
32814+ pud = pud_offset(pgd, addr);
32815+ pmd = pmd_offset(pud, addr);
32816+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
32817+ }
32818+*/
32819+#ifdef CONFIG_X86_PAE
32820+ set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
32821+/*
32822+ for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
32823+ pgd = pgd_offset_k(addr);
32824+ pud = pud_offset(pgd, addr);
32825+ pmd = pmd_offset(pud, addr);
32826+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
32827+ }
32828+*/
32829+#endif
32830+
32831+#ifdef CONFIG_MODULES
32832+ set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
32833+#endif
32834+
32835+#else
32836+ /* PaX: make kernel code/rodata read-only, rest non-executable */
32837+ for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
32838+ pgd = pgd_offset_k(addr);
32839+ pud = pud_offset(pgd, addr);
32840+ pmd = pmd_offset(pud, addr);
32841+ if (!pmd_present(*pmd))
32842+ continue;
32843+ if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
32844+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
32845+ else
32846+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
32847+ }
32848+
32849+ addr = (unsigned long)__va(__pa(__START_KERNEL_map));
32850+ end = addr + KERNEL_IMAGE_SIZE;
32851+ for (; addr < end; addr += PMD_SIZE) {
32852+ pgd = pgd_offset_k(addr);
32853+ pud = pud_offset(pgd, addr);
32854+ pmd = pmd_offset(pud, addr);
32855+ if (!pmd_present(*pmd))
32856+ continue;
32857+ if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
32858+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
32859+ }
32860+#endif
32861+
32862+ flush_tlb_all();
32863+#endif
32864+
32865 free_init_pages("unused kernel",
32866 (unsigned long)(&__init_begin),
32867 (unsigned long)(&__init_end));
32868diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
32869index e395048..cd38278 100644
32870--- a/arch/x86/mm/init_32.c
32871+++ b/arch/x86/mm/init_32.c
32872@@ -62,33 +62,6 @@ static noinline int do_test_wp_bit(void);
32873 bool __read_mostly __vmalloc_start_set = false;
32874
32875 /*
32876- * Creates a middle page table and puts a pointer to it in the
32877- * given global directory entry. This only returns the gd entry
32878- * in non-PAE compilation mode, since the middle layer is folded.
32879- */
32880-static pmd_t * __init one_md_table_init(pgd_t *pgd)
32881-{
32882- pud_t *pud;
32883- pmd_t *pmd_table;
32884-
32885-#ifdef CONFIG_X86_PAE
32886- if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
32887- pmd_table = (pmd_t *)alloc_low_page();
32888- paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
32889- set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
32890- pud = pud_offset(pgd, 0);
32891- BUG_ON(pmd_table != pmd_offset(pud, 0));
32892-
32893- return pmd_table;
32894- }
32895-#endif
32896- pud = pud_offset(pgd, 0);
32897- pmd_table = pmd_offset(pud, 0);
32898-
32899- return pmd_table;
32900-}
32901-
32902-/*
32903 * Create a page table and place a pointer to it in a middle page
32904 * directory entry:
32905 */
32906@@ -98,13 +71,28 @@ static pte_t * __init one_page_table_init(pmd_t *pmd)
32907 pte_t *page_table = (pte_t *)alloc_low_page();
32908
32909 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
32910+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
32911+ set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
32912+#else
32913 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
32914+#endif
32915 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
32916 }
32917
32918 return pte_offset_kernel(pmd, 0);
32919 }
32920
32921+static pmd_t * __init one_md_table_init(pgd_t *pgd)
32922+{
32923+ pud_t *pud;
32924+ pmd_t *pmd_table;
32925+
32926+ pud = pud_offset(pgd, 0);
32927+ pmd_table = pmd_offset(pud, 0);
32928+
32929+ return pmd_table;
32930+}
32931+
32932 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
32933 {
32934 int pgd_idx = pgd_index(vaddr);
32935@@ -208,6 +196,7 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
32936 int pgd_idx, pmd_idx;
32937 unsigned long vaddr;
32938 pgd_t *pgd;
32939+ pud_t *pud;
32940 pmd_t *pmd;
32941 pte_t *pte = NULL;
32942 unsigned long count = page_table_range_init_count(start, end);
32943@@ -222,8 +211,13 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
32944 pgd = pgd_base + pgd_idx;
32945
32946 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
32947- pmd = one_md_table_init(pgd);
32948- pmd = pmd + pmd_index(vaddr);
32949+ pud = pud_offset(pgd, vaddr);
32950+ pmd = pmd_offset(pud, vaddr);
32951+
32952+#ifdef CONFIG_X86_PAE
32953+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
32954+#endif
32955+
32956 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
32957 pmd++, pmd_idx++) {
32958 pte = page_table_kmap_check(one_page_table_init(pmd),
32959@@ -235,11 +229,20 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
32960 }
32961 }
32962
32963-static inline int is_kernel_text(unsigned long addr)
32964+static inline int is_kernel_text(unsigned long start, unsigned long end)
32965 {
32966- if (addr >= (unsigned long)_text && addr <= (unsigned long)__init_end)
32967- return 1;
32968- return 0;
32969+ if ((start >= ktla_ktva((unsigned long)_etext) ||
32970+ end <= ktla_ktva((unsigned long)_stext)) &&
32971+ (start >= ktla_ktva((unsigned long)_einittext) ||
32972+ end <= ktla_ktva((unsigned long)_sinittext)) &&
32973+
32974+#ifdef CONFIG_ACPI_SLEEP
32975+ (start >= (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
32976+#endif
32977+
32978+ (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
32979+ return 0;
32980+ return 1;
32981 }
32982
32983 /*
32984@@ -256,9 +259,10 @@ kernel_physical_mapping_init(unsigned long start,
32985 unsigned long last_map_addr = end;
32986 unsigned long start_pfn, end_pfn;
32987 pgd_t *pgd_base = swapper_pg_dir;
32988- int pgd_idx, pmd_idx, pte_ofs;
32989+ unsigned int pgd_idx, pmd_idx, pte_ofs;
32990 unsigned long pfn;
32991 pgd_t *pgd;
32992+ pud_t *pud;
32993 pmd_t *pmd;
32994 pte_t *pte;
32995 unsigned pages_2m, pages_4k;
32996@@ -291,8 +295,13 @@ repeat:
32997 pfn = start_pfn;
32998 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
32999 pgd = pgd_base + pgd_idx;
33000- for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
33001- pmd = one_md_table_init(pgd);
33002+ for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
33003+ pud = pud_offset(pgd, 0);
33004+ pmd = pmd_offset(pud, 0);
33005+
33006+#ifdef CONFIG_X86_PAE
33007+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
33008+#endif
33009
33010 if (pfn >= end_pfn)
33011 continue;
33012@@ -304,14 +313,13 @@ repeat:
33013 #endif
33014 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
33015 pmd++, pmd_idx++) {
33016- unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
33017+ unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
33018
33019 /*
33020 * Map with big pages if possible, otherwise
33021 * create normal page tables:
33022 */
33023 if (use_pse) {
33024- unsigned int addr2;
33025 pgprot_t prot = PAGE_KERNEL_LARGE;
33026 /*
33027 * first pass will use the same initial
33028@@ -322,11 +330,7 @@ repeat:
33029 _PAGE_PSE);
33030
33031 pfn &= PMD_MASK >> PAGE_SHIFT;
33032- addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
33033- PAGE_OFFSET + PAGE_SIZE-1;
33034-
33035- if (is_kernel_text(addr) ||
33036- is_kernel_text(addr2))
33037+ if (is_kernel_text(address, address + PMD_SIZE))
33038 prot = PAGE_KERNEL_LARGE_EXEC;
33039
33040 pages_2m++;
33041@@ -343,7 +347,7 @@ repeat:
33042 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
33043 pte += pte_ofs;
33044 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
33045- pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
33046+ pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
33047 pgprot_t prot = PAGE_KERNEL;
33048 /*
33049 * first pass will use the same initial
33050@@ -351,7 +355,7 @@ repeat:
33051 */
33052 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
33053
33054- if (is_kernel_text(addr))
33055+ if (is_kernel_text(address, address + PAGE_SIZE))
33056 prot = PAGE_KERNEL_EXEC;
33057
33058 pages_4k++;
33059@@ -474,7 +478,7 @@ void __init native_pagetable_init(void)
33060
33061 pud = pud_offset(pgd, va);
33062 pmd = pmd_offset(pud, va);
33063- if (!pmd_present(*pmd))
33064+ if (!pmd_present(*pmd)) // PAX TODO || pmd_large(*pmd))
33065 break;
33066
33067 /* should not be large page here */
33068@@ -532,12 +536,10 @@ void __init early_ioremap_page_table_range_init(void)
33069
33070 static void __init pagetable_init(void)
33071 {
33072- pgd_t *pgd_base = swapper_pg_dir;
33073-
33074- permanent_kmaps_init(pgd_base);
33075+ permanent_kmaps_init(swapper_pg_dir);
33076 }
33077
33078-pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
33079+pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
33080 EXPORT_SYMBOL_GPL(__supported_pte_mask);
33081
33082 /* user-defined highmem size */
33083@@ -787,10 +789,10 @@ void __init mem_init(void)
33084 ((unsigned long)&__init_end -
33085 (unsigned long)&__init_begin) >> 10,
33086
33087- (unsigned long)&_etext, (unsigned long)&_edata,
33088- ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
33089+ (unsigned long)&_sdata, (unsigned long)&_edata,
33090+ ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
33091
33092- (unsigned long)&_text, (unsigned long)&_etext,
33093+ ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
33094 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
33095
33096 /*
33097@@ -883,6 +885,7 @@ void set_kernel_text_rw(void)
33098 if (!kernel_set_to_readonly)
33099 return;
33100
33101+ start = ktla_ktva(start);
33102 pr_debug("Set kernel text: %lx - %lx for read write\n",
33103 start, start+size);
33104
33105@@ -897,6 +900,7 @@ void set_kernel_text_ro(void)
33106 if (!kernel_set_to_readonly)
33107 return;
33108
33109+ start = ktla_ktva(start);
33110 pr_debug("Set kernel text: %lx - %lx for read only\n",
33111 start, start+size);
33112
33113@@ -925,6 +929,7 @@ void mark_rodata_ro(void)
33114 unsigned long start = PFN_ALIGN(_text);
33115 unsigned long size = PFN_ALIGN(_etext) - start;
33116
33117+ start = ktla_ktva(start);
33118 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
33119 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
33120 size >> 10);
33121diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
33122index f35c66c..84b95ef 100644
33123--- a/arch/x86/mm/init_64.c
33124+++ b/arch/x86/mm/init_64.c
33125@@ -151,7 +151,7 @@ early_param("gbpages", parse_direct_gbpages_on);
33126 * around without checking the pgd every time.
33127 */
33128
33129-pteval_t __supported_pte_mask __read_mostly = ~_PAGE_IOMAP;
33130+pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_IOMAP);
33131 EXPORT_SYMBOL_GPL(__supported_pte_mask);
33132
33133 int force_personality32;
33134@@ -184,12 +184,29 @@ void sync_global_pgds(unsigned long start, unsigned long end)
33135
33136 for (address = start; address <= end; address += PGDIR_SIZE) {
33137 const pgd_t *pgd_ref = pgd_offset_k(address);
33138+
33139+#ifdef CONFIG_PAX_PER_CPU_PGD
33140+ unsigned long cpu;
33141+#else
33142 struct page *page;
33143+#endif
33144
33145 if (pgd_none(*pgd_ref))
33146 continue;
33147
33148 spin_lock(&pgd_lock);
33149+
33150+#ifdef CONFIG_PAX_PER_CPU_PGD
33151+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
33152+ pgd_t *pgd = pgd_offset_cpu(cpu, user, address);
33153+
33154+ if (pgd_none(*pgd))
33155+ set_pgd(pgd, *pgd_ref);
33156+ else
33157+ BUG_ON(pgd_page_vaddr(*pgd)
33158+ != pgd_page_vaddr(*pgd_ref));
33159+ pgd = pgd_offset_cpu(cpu, kernel, address);
33160+#else
33161 list_for_each_entry(page, &pgd_list, lru) {
33162 pgd_t *pgd;
33163 spinlock_t *pgt_lock;
33164@@ -198,6 +215,7 @@ void sync_global_pgds(unsigned long start, unsigned long end)
33165 /* the pgt_lock only for Xen */
33166 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
33167 spin_lock(pgt_lock);
33168+#endif
33169
33170 if (pgd_none(*pgd))
33171 set_pgd(pgd, *pgd_ref);
33172@@ -205,7 +223,10 @@ void sync_global_pgds(unsigned long start, unsigned long end)
33173 BUG_ON(pgd_page_vaddr(*pgd)
33174 != pgd_page_vaddr(*pgd_ref));
33175
33176+#ifndef CONFIG_PAX_PER_CPU_PGD
33177 spin_unlock(pgt_lock);
33178+#endif
33179+
33180 }
33181 spin_unlock(&pgd_lock);
33182 }
33183@@ -238,7 +259,7 @@ static pud_t *fill_pud(pgd_t *pgd, unsigned long vaddr)
33184 {
33185 if (pgd_none(*pgd)) {
33186 pud_t *pud = (pud_t *)spp_getpage();
33187- pgd_populate(&init_mm, pgd, pud);
33188+ pgd_populate_kernel(&init_mm, pgd, pud);
33189 if (pud != pud_offset(pgd, 0))
33190 printk(KERN_ERR "PAGETABLE BUG #00! %p <-> %p\n",
33191 pud, pud_offset(pgd, 0));
33192@@ -250,7 +271,7 @@ static pmd_t *fill_pmd(pud_t *pud, unsigned long vaddr)
33193 {
33194 if (pud_none(*pud)) {
33195 pmd_t *pmd = (pmd_t *) spp_getpage();
33196- pud_populate(&init_mm, pud, pmd);
33197+ pud_populate_kernel(&init_mm, pud, pmd);
33198 if (pmd != pmd_offset(pud, 0))
33199 printk(KERN_ERR "PAGETABLE BUG #01! %p <-> %p\n",
33200 pmd, pmd_offset(pud, 0));
33201@@ -279,7 +300,9 @@ void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte)
33202 pmd = fill_pmd(pud, vaddr);
33203 pte = fill_pte(pmd, vaddr);
33204
33205+ pax_open_kernel();
33206 set_pte(pte, new_pte);
33207+ pax_close_kernel();
33208
33209 /*
33210 * It's enough to flush this one mapping.
33211@@ -338,14 +361,12 @@ static void __init __init_extra_mapping(unsigned long phys, unsigned long size,
33212 pgd = pgd_offset_k((unsigned long)__va(phys));
33213 if (pgd_none(*pgd)) {
33214 pud = (pud_t *) spp_getpage();
33215- set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
33216- _PAGE_USER));
33217+ set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
33218 }
33219 pud = pud_offset(pgd, (unsigned long)__va(phys));
33220 if (pud_none(*pud)) {
33221 pmd = (pmd_t *) spp_getpage();
33222- set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
33223- _PAGE_USER));
33224+ set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
33225 }
33226 pmd = pmd_offset(pud, phys);
33227 BUG_ON(!pmd_none(*pmd));
33228@@ -586,7 +607,7 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
33229 prot);
33230
33231 spin_lock(&init_mm.page_table_lock);
33232- pud_populate(&init_mm, pud, pmd);
33233+ pud_populate_kernel(&init_mm, pud, pmd);
33234 spin_unlock(&init_mm.page_table_lock);
33235 }
33236 __flush_tlb_all();
33237@@ -627,7 +648,7 @@ kernel_physical_mapping_init(unsigned long start,
33238 page_size_mask);
33239
33240 spin_lock(&init_mm.page_table_lock);
33241- pgd_populate(&init_mm, pgd, pud);
33242+ pgd_populate_kernel(&init_mm, pgd, pud);
33243 spin_unlock(&init_mm.page_table_lock);
33244 pgd_changed = true;
33245 }
33246@@ -1188,8 +1209,8 @@ int kern_addr_valid(unsigned long addr)
33247 static struct vm_area_struct gate_vma = {
33248 .vm_start = VSYSCALL_START,
33249 .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
33250- .vm_page_prot = PAGE_READONLY_EXEC,
33251- .vm_flags = VM_READ | VM_EXEC
33252+ .vm_page_prot = PAGE_READONLY,
33253+ .vm_flags = VM_READ
33254 };
33255
33256 struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
33257@@ -1223,7 +1244,7 @@ int in_gate_area_no_mm(unsigned long addr)
33258
33259 const char *arch_vma_name(struct vm_area_struct *vma)
33260 {
33261- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
33262+ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
33263 return "[vdso]";
33264 if (vma == &gate_vma)
33265 return "[vsyscall]";
33266diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
33267index 7b179b4..6bd17777 100644
33268--- a/arch/x86/mm/iomap_32.c
33269+++ b/arch/x86/mm/iomap_32.c
33270@@ -64,7 +64,11 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
33271 type = kmap_atomic_idx_push();
33272 idx = type + KM_TYPE_NR * smp_processor_id();
33273 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
33274+
33275+ pax_open_kernel();
33276 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
33277+ pax_close_kernel();
33278+
33279 arch_flush_lazy_mmu_mode();
33280
33281 return (void *)vaddr;
33282diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
33283index 799580c..72f9fe0 100644
33284--- a/arch/x86/mm/ioremap.c
33285+++ b/arch/x86/mm/ioremap.c
33286@@ -97,7 +97,7 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
33287 for (pfn = phys_addr >> PAGE_SHIFT; pfn <= last_pfn; pfn++) {
33288 int is_ram = page_is_ram(pfn);
33289
33290- if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
33291+ if (is_ram && pfn_valid(pfn) && (pfn >= 0x100 || !PageReserved(pfn_to_page(pfn))))
33292 return NULL;
33293 WARN_ON_ONCE(is_ram);
33294 }
33295@@ -256,7 +256,7 @@ EXPORT_SYMBOL(ioremap_prot);
33296 *
33297 * Caller must ensure there is only one unmapping for the same pointer.
33298 */
33299-void iounmap(volatile void __iomem *addr)
33300+void iounmap(const volatile void __iomem *addr)
33301 {
33302 struct vm_struct *p, *o;
33303
33304@@ -310,6 +310,9 @@ void *xlate_dev_mem_ptr(unsigned long phys)
33305
33306 /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
33307 if (page_is_ram(start >> PAGE_SHIFT))
33308+#ifdef CONFIG_HIGHMEM
33309+ if ((start >> PAGE_SHIFT) < max_low_pfn)
33310+#endif
33311 return __va(phys);
33312
33313 addr = (void __force *)ioremap_cache(start, PAGE_SIZE);
33314@@ -322,6 +325,9 @@ void *xlate_dev_mem_ptr(unsigned long phys)
33315 void unxlate_dev_mem_ptr(unsigned long phys, void *addr)
33316 {
33317 if (page_is_ram(phys >> PAGE_SHIFT))
33318+#ifdef CONFIG_HIGHMEM
33319+ if ((phys >> PAGE_SHIFT) < max_low_pfn)
33320+#endif
33321 return;
33322
33323 iounmap((void __iomem *)((unsigned long)addr & PAGE_MASK));
33324@@ -339,7 +345,7 @@ static int __init early_ioremap_debug_setup(char *str)
33325 early_param("early_ioremap_debug", early_ioremap_debug_setup);
33326
33327 static __initdata int after_paging_init;
33328-static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
33329+static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
33330
33331 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
33332 {
33333@@ -376,8 +382,7 @@ void __init early_ioremap_init(void)
33334 slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
33335
33336 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
33337- memset(bm_pte, 0, sizeof(bm_pte));
33338- pmd_populate_kernel(&init_mm, pmd, bm_pte);
33339+ pmd_populate_user(&init_mm, pmd, bm_pte);
33340
33341 /*
33342 * The boot-ioremap range spans multiple pmds, for which
33343diff --git a/arch/x86/mm/kmemcheck/kmemcheck.c b/arch/x86/mm/kmemcheck/kmemcheck.c
33344index d87dd6d..bf3fa66 100644
33345--- a/arch/x86/mm/kmemcheck/kmemcheck.c
33346+++ b/arch/x86/mm/kmemcheck/kmemcheck.c
33347@@ -622,9 +622,9 @@ bool kmemcheck_fault(struct pt_regs *regs, unsigned long address,
33348 * memory (e.g. tracked pages)? For now, we need this to avoid
33349 * invoking kmemcheck for PnP BIOS calls.
33350 */
33351- if (regs->flags & X86_VM_MASK)
33352+ if (v8086_mode(regs))
33353 return false;
33354- if (regs->cs != __KERNEL_CS)
33355+ if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
33356 return false;
33357
33358 pte = kmemcheck_pte_lookup(address);
33359diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
33360index 25e7e13..1964579 100644
33361--- a/arch/x86/mm/mmap.c
33362+++ b/arch/x86/mm/mmap.c
33363@@ -52,7 +52,7 @@ static unsigned int stack_maxrandom_size(void)
33364 * Leave an at least ~128 MB hole with possible stack randomization.
33365 */
33366 #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
33367-#define MAX_GAP (TASK_SIZE/6*5)
33368+#define MAX_GAP (pax_task_size/6*5)
33369
33370 static int mmap_is_legacy(void)
33371 {
33372@@ -82,27 +82,40 @@ static unsigned long mmap_rnd(void)
33373 return rnd << PAGE_SHIFT;
33374 }
33375
33376-static unsigned long mmap_base(void)
33377+static unsigned long mmap_base(struct mm_struct *mm)
33378 {
33379 unsigned long gap = rlimit(RLIMIT_STACK);
33380+ unsigned long pax_task_size = TASK_SIZE;
33381+
33382+#ifdef CONFIG_PAX_SEGMEXEC
33383+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
33384+ pax_task_size = SEGMEXEC_TASK_SIZE;
33385+#endif
33386
33387 if (gap < MIN_GAP)
33388 gap = MIN_GAP;
33389 else if (gap > MAX_GAP)
33390 gap = MAX_GAP;
33391
33392- return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
33393+ return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
33394 }
33395
33396 /*
33397 * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
33398 * does, but not when emulating X86_32
33399 */
33400-static unsigned long mmap_legacy_base(void)
33401+static unsigned long mmap_legacy_base(struct mm_struct *mm)
33402 {
33403- if (mmap_is_ia32())
33404+ if (mmap_is_ia32()) {
33405+
33406+#ifdef CONFIG_PAX_SEGMEXEC
33407+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
33408+ return SEGMEXEC_TASK_UNMAPPED_BASE;
33409+ else
33410+#endif
33411+
33412 return TASK_UNMAPPED_BASE;
33413- else
33414+ } else
33415 return TASK_UNMAPPED_BASE + mmap_rnd();
33416 }
33417
33418@@ -112,8 +125,15 @@ static unsigned long mmap_legacy_base(void)
33419 */
33420 void arch_pick_mmap_layout(struct mm_struct *mm)
33421 {
33422- mm->mmap_legacy_base = mmap_legacy_base();
33423- mm->mmap_base = mmap_base();
33424+ mm->mmap_legacy_base = mmap_legacy_base(mm);
33425+ mm->mmap_base = mmap_base(mm);
33426+
33427+#ifdef CONFIG_PAX_RANDMMAP
33428+ if (mm->pax_flags & MF_PAX_RANDMMAP) {
33429+ mm->mmap_legacy_base += mm->delta_mmap;
33430+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
33431+ }
33432+#endif
33433
33434 if (mmap_is_legacy()) {
33435 mm->mmap_base = mm->mmap_legacy_base;
33436diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c
33437index 0057a7a..95c7edd 100644
33438--- a/arch/x86/mm/mmio-mod.c
33439+++ b/arch/x86/mm/mmio-mod.c
33440@@ -194,7 +194,7 @@ static void pre(struct kmmio_probe *p, struct pt_regs *regs,
33441 break;
33442 default:
33443 {
33444- unsigned char *ip = (unsigned char *)instptr;
33445+ unsigned char *ip = (unsigned char *)ktla_ktva(instptr);
33446 my_trace->opcode = MMIO_UNKNOWN_OP;
33447 my_trace->width = 0;
33448 my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
33449@@ -234,7 +234,7 @@ static void post(struct kmmio_probe *p, unsigned long condition,
33450 static void ioremap_trace_core(resource_size_t offset, unsigned long size,
33451 void __iomem *addr)
33452 {
33453- static atomic_t next_id;
33454+ static atomic_unchecked_t next_id;
33455 struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
33456 /* These are page-unaligned. */
33457 struct mmiotrace_map map = {
33458@@ -258,7 +258,7 @@ static void ioremap_trace_core(resource_size_t offset, unsigned long size,
33459 .private = trace
33460 },
33461 .phys = offset,
33462- .id = atomic_inc_return(&next_id)
33463+ .id = atomic_inc_return_unchecked(&next_id)
33464 };
33465 map.map_id = trace->id;
33466
33467@@ -290,7 +290,7 @@ void mmiotrace_ioremap(resource_size_t offset, unsigned long size,
33468 ioremap_trace_core(offset, size, addr);
33469 }
33470
33471-static void iounmap_trace_core(volatile void __iomem *addr)
33472+static void iounmap_trace_core(const volatile void __iomem *addr)
33473 {
33474 struct mmiotrace_map map = {
33475 .phys = 0,
33476@@ -328,7 +328,7 @@ not_enabled:
33477 }
33478 }
33479
33480-void mmiotrace_iounmap(volatile void __iomem *addr)
33481+void mmiotrace_iounmap(const volatile void __iomem *addr)
33482 {
33483 might_sleep();
33484 if (is_enabled()) /* recheck and proper locking in *_core() */
33485diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
33486index 27aa0455..0eb1406 100644
33487--- a/arch/x86/mm/numa.c
33488+++ b/arch/x86/mm/numa.c
33489@@ -478,7 +478,7 @@ static bool __init numa_meminfo_cover_memory(const struct numa_meminfo *mi)
33490 return true;
33491 }
33492
33493-static int __init numa_register_memblks(struct numa_meminfo *mi)
33494+static int __init __intentional_overflow(-1) numa_register_memblks(struct numa_meminfo *mi)
33495 {
33496 unsigned long uninitialized_var(pfn_align);
33497 int i, nid;
33498diff --git a/arch/x86/mm/pageattr-test.c b/arch/x86/mm/pageattr-test.c
33499index 461bc82..4e091a3 100644
33500--- a/arch/x86/mm/pageattr-test.c
33501+++ b/arch/x86/mm/pageattr-test.c
33502@@ -35,7 +35,7 @@ enum {
33503
33504 static int pte_testbit(pte_t pte)
33505 {
33506- return pte_flags(pte) & _PAGE_UNUSED1;
33507+ return pte_flags(pte) & _PAGE_CPA_TEST;
33508 }
33509
33510 struct split_state {
33511diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
33512index a348868..3c64310 100644
33513--- a/arch/x86/mm/pageattr.c
33514+++ b/arch/x86/mm/pageattr.c
33515@@ -262,7 +262,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
33516 */
33517 #ifdef CONFIG_PCI_BIOS
33518 if (pcibios_enabled && within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
33519- pgprot_val(forbidden) |= _PAGE_NX;
33520+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
33521 #endif
33522
33523 /*
33524@@ -270,9 +270,10 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
33525 * Does not cover __inittext since that is gone later on. On
33526 * 64bit we do not enforce !NX on the low mapping
33527 */
33528- if (within(address, (unsigned long)_text, (unsigned long)_etext))
33529- pgprot_val(forbidden) |= _PAGE_NX;
33530+ if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
33531+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
33532
33533+#ifdef CONFIG_DEBUG_RODATA
33534 /*
33535 * The .rodata section needs to be read-only. Using the pfn
33536 * catches all aliases.
33537@@ -280,6 +281,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
33538 if (within(pfn, __pa_symbol(__start_rodata) >> PAGE_SHIFT,
33539 __pa_symbol(__end_rodata) >> PAGE_SHIFT))
33540 pgprot_val(forbidden) |= _PAGE_RW;
33541+#endif
33542
33543 #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
33544 /*
33545@@ -318,6 +320,13 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
33546 }
33547 #endif
33548
33549+#ifdef CONFIG_PAX_KERNEXEC
33550+ if (within(pfn, __pa(ktla_ktva((unsigned long)&_text)), __pa((unsigned long)&_sdata))) {
33551+ pgprot_val(forbidden) |= _PAGE_RW;
33552+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
33553+ }
33554+#endif
33555+
33556 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
33557
33558 return prot;
33559@@ -416,23 +425,37 @@ EXPORT_SYMBOL_GPL(slow_virt_to_phys);
33560 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
33561 {
33562 /* change init_mm */
33563+ pax_open_kernel();
33564 set_pte_atomic(kpte, pte);
33565+
33566 #ifdef CONFIG_X86_32
33567 if (!SHARED_KERNEL_PMD) {
33568+
33569+#ifdef CONFIG_PAX_PER_CPU_PGD
33570+ unsigned long cpu;
33571+#else
33572 struct page *page;
33573+#endif
33574
33575+#ifdef CONFIG_PAX_PER_CPU_PGD
33576+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
33577+ pgd_t *pgd = get_cpu_pgd(cpu, kernel);
33578+#else
33579 list_for_each_entry(page, &pgd_list, lru) {
33580- pgd_t *pgd;
33581+ pgd_t *pgd = (pgd_t *)page_address(page);
33582+#endif
33583+
33584 pud_t *pud;
33585 pmd_t *pmd;
33586
33587- pgd = (pgd_t *)page_address(page) + pgd_index(address);
33588+ pgd += pgd_index(address);
33589 pud = pud_offset(pgd, address);
33590 pmd = pmd_offset(pud, address);
33591 set_pte_atomic((pte_t *)pmd, pte);
33592 }
33593 }
33594 #endif
33595+ pax_close_kernel();
33596 }
33597
33598 static int
33599diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
33600index 6574388..87e9bef 100644
33601--- a/arch/x86/mm/pat.c
33602+++ b/arch/x86/mm/pat.c
33603@@ -376,7 +376,7 @@ int free_memtype(u64 start, u64 end)
33604
33605 if (!entry) {
33606 printk(KERN_INFO "%s:%d freeing invalid memtype [mem %#010Lx-%#010Lx]\n",
33607- current->comm, current->pid, start, end - 1);
33608+ current->comm, task_pid_nr(current), start, end - 1);
33609 return -EINVAL;
33610 }
33611
33612@@ -506,8 +506,8 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
33613
33614 while (cursor < to) {
33615 if (!devmem_is_allowed(pfn)) {
33616- printk(KERN_INFO "Program %s tried to access /dev/mem between [mem %#010Lx-%#010Lx]\n",
33617- current->comm, from, to - 1);
33618+ printk(KERN_INFO "Program %s tried to access /dev/mem between [mem %#010Lx-%#010Lx] (%#010Lx)\n",
33619+ current->comm, from, to - 1, cursor);
33620 return 0;
33621 }
33622 cursor += PAGE_SIZE;
33623@@ -577,7 +577,7 @@ int kernel_map_sync_memtype(u64 base, unsigned long size, unsigned long flags)
33624 if (ioremap_change_attr((unsigned long)__va(base), id_sz, flags) < 0) {
33625 printk(KERN_INFO "%s:%d ioremap_change_attr failed %s "
33626 "for [mem %#010Lx-%#010Lx]\n",
33627- current->comm, current->pid,
33628+ current->comm, task_pid_nr(current),
33629 cattr_name(flags),
33630 base, (unsigned long long)(base + size-1));
33631 return -EINVAL;
33632@@ -612,7 +612,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
33633 flags = lookup_memtype(paddr);
33634 if (want_flags != flags) {
33635 printk(KERN_WARNING "%s:%d map pfn RAM range req %s for [mem %#010Lx-%#010Lx], got %s\n",
33636- current->comm, current->pid,
33637+ current->comm, task_pid_nr(current),
33638 cattr_name(want_flags),
33639 (unsigned long long)paddr,
33640 (unsigned long long)(paddr + size - 1),
33641@@ -634,7 +634,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
33642 free_memtype(paddr, paddr + size);
33643 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
33644 " for [mem %#010Lx-%#010Lx], got %s\n",
33645- current->comm, current->pid,
33646+ current->comm, task_pid_nr(current),
33647 cattr_name(want_flags),
33648 (unsigned long long)paddr,
33649 (unsigned long long)(paddr + size - 1),
33650diff --git a/arch/x86/mm/pat_rbtree.c b/arch/x86/mm/pat_rbtree.c
33651index 415f6c4..d319983 100644
33652--- a/arch/x86/mm/pat_rbtree.c
33653+++ b/arch/x86/mm/pat_rbtree.c
33654@@ -160,7 +160,7 @@ success:
33655
33656 failure:
33657 printk(KERN_INFO "%s:%d conflicting memory types "
33658- "%Lx-%Lx %s<->%s\n", current->comm, current->pid, start,
33659+ "%Lx-%Lx %s<->%s\n", current->comm, task_pid_nr(current), start,
33660 end, cattr_name(found_type), cattr_name(match->type));
33661 return -EBUSY;
33662 }
33663diff --git a/arch/x86/mm/pf_in.c b/arch/x86/mm/pf_in.c
33664index 9f0614d..92ae64a 100644
33665--- a/arch/x86/mm/pf_in.c
33666+++ b/arch/x86/mm/pf_in.c
33667@@ -148,7 +148,7 @@ enum reason_type get_ins_type(unsigned long ins_addr)
33668 int i;
33669 enum reason_type rv = OTHERS;
33670
33671- p = (unsigned char *)ins_addr;
33672+ p = (unsigned char *)ktla_ktva(ins_addr);
33673 p += skip_prefix(p, &prf);
33674 p += get_opcode(p, &opcode);
33675
33676@@ -168,7 +168,7 @@ static unsigned int get_ins_reg_width(unsigned long ins_addr)
33677 struct prefix_bits prf;
33678 int i;
33679
33680- p = (unsigned char *)ins_addr;
33681+ p = (unsigned char *)ktla_ktva(ins_addr);
33682 p += skip_prefix(p, &prf);
33683 p += get_opcode(p, &opcode);
33684
33685@@ -191,7 +191,7 @@ unsigned int get_ins_mem_width(unsigned long ins_addr)
33686 struct prefix_bits prf;
33687 int i;
33688
33689- p = (unsigned char *)ins_addr;
33690+ p = (unsigned char *)ktla_ktva(ins_addr);
33691 p += skip_prefix(p, &prf);
33692 p += get_opcode(p, &opcode);
33693
33694@@ -415,7 +415,7 @@ unsigned long get_ins_reg_val(unsigned long ins_addr, struct pt_regs *regs)
33695 struct prefix_bits prf;
33696 int i;
33697
33698- p = (unsigned char *)ins_addr;
33699+ p = (unsigned char *)ktla_ktva(ins_addr);
33700 p += skip_prefix(p, &prf);
33701 p += get_opcode(p, &opcode);
33702 for (i = 0; i < ARRAY_SIZE(reg_rop); i++)
33703@@ -470,7 +470,7 @@ unsigned long get_ins_imm_val(unsigned long ins_addr)
33704 struct prefix_bits prf;
33705 int i;
33706
33707- p = (unsigned char *)ins_addr;
33708+ p = (unsigned char *)ktla_ktva(ins_addr);
33709 p += skip_prefix(p, &prf);
33710 p += get_opcode(p, &opcode);
33711 for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
33712diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
33713index c96314a..433b127 100644
33714--- a/arch/x86/mm/pgtable.c
33715+++ b/arch/x86/mm/pgtable.c
33716@@ -97,10 +97,71 @@ static inline void pgd_list_del(pgd_t *pgd)
33717 list_del(&page->lru);
33718 }
33719
33720-#define UNSHARED_PTRS_PER_PGD \
33721- (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
33722+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
33723+pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
33724
33725+void __shadow_user_pgds(pgd_t *dst, const pgd_t *src)
33726+{
33727+ unsigned int count = USER_PGD_PTRS;
33728
33729+ if (!pax_user_shadow_base)
33730+ return;
33731+
33732+ while (count--)
33733+ *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
33734+}
33735+#endif
33736+
33737+#ifdef CONFIG_PAX_PER_CPU_PGD
33738+void __clone_user_pgds(pgd_t *dst, const pgd_t *src)
33739+{
33740+ unsigned int count = USER_PGD_PTRS;
33741+
33742+ while (count--) {
33743+ pgd_t pgd;
33744+
33745+#ifdef CONFIG_X86_64
33746+ pgd = __pgd(pgd_val(*src++) | _PAGE_USER);
33747+#else
33748+ pgd = *src++;
33749+#endif
33750+
33751+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
33752+ pgd = __pgd(pgd_val(pgd) & clone_pgd_mask);
33753+#endif
33754+
33755+ *dst++ = pgd;
33756+ }
33757+
33758+}
33759+#endif
33760+
33761+#ifdef CONFIG_X86_64
33762+#define pxd_t pud_t
33763+#define pyd_t pgd_t
33764+#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn)
33765+#define pgtable_pxd_page_ctor(page) true
33766+#define pgtable_pxd_page_dtor(page)
33767+#define pxd_free(mm, pud) pud_free((mm), (pud))
33768+#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud))
33769+#define pyd_offset(mm, address) pgd_offset((mm), (address))
33770+#define PYD_SIZE PGDIR_SIZE
33771+#else
33772+#define pxd_t pmd_t
33773+#define pyd_t pud_t
33774+#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
33775+#define pgtable_pxd_page_ctor(page) pgtable_pmd_page_ctor(page)
33776+#define pgtable_pxd_page_dtor(page) pgtable_pmd_page_dtor(page)
33777+#define pxd_free(mm, pud) pmd_free((mm), (pud))
33778+#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud))
33779+#define pyd_offset(mm, address) pud_offset((mm), (address))
33780+#define PYD_SIZE PUD_SIZE
33781+#endif
33782+
33783+#ifdef CONFIG_PAX_PER_CPU_PGD
33784+static inline void pgd_ctor(struct mm_struct *mm, pgd_t *pgd) {}
33785+static inline void pgd_dtor(pgd_t *pgd) {}
33786+#else
33787 static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
33788 {
33789 BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm));
33790@@ -141,6 +202,7 @@ static void pgd_dtor(pgd_t *pgd)
33791 pgd_list_del(pgd);
33792 spin_unlock(&pgd_lock);
33793 }
33794+#endif
33795
33796 /*
33797 * List of all pgd's needed for non-PAE so it can invalidate entries
33798@@ -153,7 +215,7 @@ static void pgd_dtor(pgd_t *pgd)
33799 * -- nyc
33800 */
33801
33802-#ifdef CONFIG_X86_PAE
33803+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
33804 /*
33805 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
33806 * updating the top-level pagetable entries to guarantee the
33807@@ -165,7 +227,7 @@ static void pgd_dtor(pgd_t *pgd)
33808 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
33809 * and initialize the kernel pmds here.
33810 */
33811-#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
33812+#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
33813
33814 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
33815 {
33816@@ -183,43 +245,45 @@ void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
33817 */
33818 flush_tlb_mm(mm);
33819 }
33820+#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
33821+#define PREALLOCATED_PXDS USER_PGD_PTRS
33822 #else /* !CONFIG_X86_PAE */
33823
33824 /* No need to prepopulate any pagetable entries in non-PAE modes. */
33825-#define PREALLOCATED_PMDS 0
33826+#define PREALLOCATED_PXDS 0
33827
33828 #endif /* CONFIG_X86_PAE */
33829
33830-static void free_pmds(pmd_t *pmds[])
33831+static void free_pxds(pxd_t *pxds[])
33832 {
33833 int i;
33834
33835- for(i = 0; i < PREALLOCATED_PMDS; i++)
33836- if (pmds[i]) {
33837- pgtable_pmd_page_dtor(virt_to_page(pmds[i]));
33838- free_page((unsigned long)pmds[i]);
33839+ for(i = 0; i < PREALLOCATED_PXDS; i++)
33840+ if (pxds[i]) {
33841+ pgtable_pxd_page_dtor(virt_to_page(pxds[i]));
33842+ free_page((unsigned long)pxds[i]);
33843 }
33844 }
33845
33846-static int preallocate_pmds(pmd_t *pmds[])
33847+static int preallocate_pxds(pxd_t *pxds[])
33848 {
33849 int i;
33850 bool failed = false;
33851
33852- for(i = 0; i < PREALLOCATED_PMDS; i++) {
33853- pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
33854- if (!pmd)
33855+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
33856+ pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
33857+ if (!pxd)
33858 failed = true;
33859- if (pmd && !pgtable_pmd_page_ctor(virt_to_page(pmd))) {
33860- free_page((unsigned long)pmd);
33861- pmd = NULL;
33862+ if (pxd && !pgtable_pxd_page_ctor(virt_to_page(pxd))) {
33863+ free_page((unsigned long)pxd);
33864+ pxd = NULL;
33865 failed = true;
33866 }
33867- pmds[i] = pmd;
33868+ pxds[i] = pxd;
33869 }
33870
33871 if (failed) {
33872- free_pmds(pmds);
33873+ free_pxds(pxds);
33874 return -ENOMEM;
33875 }
33876
33877@@ -232,49 +296,52 @@ static int preallocate_pmds(pmd_t *pmds[])
33878 * preallocate which never got a corresponding vma will need to be
33879 * freed manually.
33880 */
33881-static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
33882+static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
33883 {
33884 int i;
33885
33886- for(i = 0; i < PREALLOCATED_PMDS; i++) {
33887+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
33888 pgd_t pgd = pgdp[i];
33889
33890 if (pgd_val(pgd) != 0) {
33891- pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
33892+ pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
33893
33894- pgdp[i] = native_make_pgd(0);
33895+ set_pgd(pgdp + i, native_make_pgd(0));
33896
33897- paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
33898- pmd_free(mm, pmd);
33899+ paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
33900+ pxd_free(mm, pxd);
33901 }
33902 }
33903 }
33904
33905-static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
33906+static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
33907 {
33908- pud_t *pud;
33909+ pyd_t *pyd;
33910 int i;
33911
33912- if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
33913+ if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
33914 return;
33915
33916- pud = pud_offset(pgd, 0);
33917-
33918- for (i = 0; i < PREALLOCATED_PMDS; i++, pud++) {
33919- pmd_t *pmd = pmds[i];
33920+#ifdef CONFIG_X86_64
33921+ pyd = pyd_offset(mm, 0L);
33922+#else
33923+ pyd = pyd_offset(pgd, 0L);
33924+#endif
33925
33926+ for (i = 0; i < PREALLOCATED_PXDS; i++, pyd++) {
33927+ pxd_t *pxd = pxds[i];
33928 if (i >= KERNEL_PGD_BOUNDARY)
33929- memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
33930- sizeof(pmd_t) * PTRS_PER_PMD);
33931+ memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
33932+ sizeof(pxd_t) * PTRS_PER_PMD);
33933
33934- pud_populate(mm, pud, pmd);
33935+ pyd_populate(mm, pyd, pxd);
33936 }
33937 }
33938
33939 pgd_t *pgd_alloc(struct mm_struct *mm)
33940 {
33941 pgd_t *pgd;
33942- pmd_t *pmds[PREALLOCATED_PMDS];
33943+ pxd_t *pxds[PREALLOCATED_PXDS];
33944
33945 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
33946
33947@@ -283,11 +350,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
33948
33949 mm->pgd = pgd;
33950
33951- if (preallocate_pmds(pmds) != 0)
33952+ if (preallocate_pxds(pxds) != 0)
33953 goto out_free_pgd;
33954
33955 if (paravirt_pgd_alloc(mm) != 0)
33956- goto out_free_pmds;
33957+ goto out_free_pxds;
33958
33959 /*
33960 * Make sure that pre-populating the pmds is atomic with
33961@@ -297,14 +364,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
33962 spin_lock(&pgd_lock);
33963
33964 pgd_ctor(mm, pgd);
33965- pgd_prepopulate_pmd(mm, pgd, pmds);
33966+ pgd_prepopulate_pxd(mm, pgd, pxds);
33967
33968 spin_unlock(&pgd_lock);
33969
33970 return pgd;
33971
33972-out_free_pmds:
33973- free_pmds(pmds);
33974+out_free_pxds:
33975+ free_pxds(pxds);
33976 out_free_pgd:
33977 free_page((unsigned long)pgd);
33978 out:
33979@@ -313,7 +380,7 @@ out:
33980
33981 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
33982 {
33983- pgd_mop_up_pmds(mm, pgd);
33984+ pgd_mop_up_pxds(mm, pgd);
33985 pgd_dtor(pgd);
33986 paravirt_pgd_free(mm, pgd);
33987 free_page((unsigned long)pgd);
33988diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c
33989index a69bcb8..19068ab 100644
33990--- a/arch/x86/mm/pgtable_32.c
33991+++ b/arch/x86/mm/pgtable_32.c
33992@@ -47,10 +47,13 @@ void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
33993 return;
33994 }
33995 pte = pte_offset_kernel(pmd, vaddr);
33996+
33997+ pax_open_kernel();
33998 if (pte_val(pteval))
33999 set_pte_at(&init_mm, vaddr, pte, pteval);
34000 else
34001 pte_clear(&init_mm, vaddr, pte);
34002+ pax_close_kernel();
34003
34004 /*
34005 * It's enough to flush this one mapping.
34006diff --git a/arch/x86/mm/physaddr.c b/arch/x86/mm/physaddr.c
34007index e666cbb..61788c45 100644
34008--- a/arch/x86/mm/physaddr.c
34009+++ b/arch/x86/mm/physaddr.c
34010@@ -10,7 +10,7 @@
34011 #ifdef CONFIG_X86_64
34012
34013 #ifdef CONFIG_DEBUG_VIRTUAL
34014-unsigned long __phys_addr(unsigned long x)
34015+unsigned long __intentional_overflow(-1) __phys_addr(unsigned long x)
34016 {
34017 unsigned long y = x - __START_KERNEL_map;
34018
34019@@ -67,7 +67,7 @@ EXPORT_SYMBOL(__virt_addr_valid);
34020 #else
34021
34022 #ifdef CONFIG_DEBUG_VIRTUAL
34023-unsigned long __phys_addr(unsigned long x)
34024+unsigned long __intentional_overflow(-1) __phys_addr(unsigned long x)
34025 {
34026 unsigned long phys_addr = x - PAGE_OFFSET;
34027 /* VMALLOC_* aren't constants */
34028diff --git a/arch/x86/mm/setup_nx.c b/arch/x86/mm/setup_nx.c
34029index 90555bf..f5f1828 100644
34030--- a/arch/x86/mm/setup_nx.c
34031+++ b/arch/x86/mm/setup_nx.c
34032@@ -5,8 +5,10 @@
34033 #include <asm/pgtable.h>
34034 #include <asm/proto.h>
34035
34036+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
34037 static int disable_nx;
34038
34039+#ifndef CONFIG_PAX_PAGEEXEC
34040 /*
34041 * noexec = on|off
34042 *
34043@@ -28,12 +30,17 @@ static int __init noexec_setup(char *str)
34044 return 0;
34045 }
34046 early_param("noexec", noexec_setup);
34047+#endif
34048+
34049+#endif
34050
34051 void x86_configure_nx(void)
34052 {
34053+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
34054 if (cpu_has_nx && !disable_nx)
34055 __supported_pte_mask |= _PAGE_NX;
34056 else
34057+#endif
34058 __supported_pte_mask &= ~_PAGE_NX;
34059 }
34060
34061diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
34062index dd8dda1..9e9b0f6 100644
34063--- a/arch/x86/mm/tlb.c
34064+++ b/arch/x86/mm/tlb.c
34065@@ -48,7 +48,11 @@ void leave_mm(int cpu)
34066 BUG();
34067 if (cpumask_test_cpu(cpu, mm_cpumask(active_mm))) {
34068 cpumask_clear_cpu(cpu, mm_cpumask(active_mm));
34069+
34070+#ifndef CONFIG_PAX_PER_CPU_PGD
34071 load_cr3(swapper_pg_dir);
34072+#endif
34073+
34074 }
34075 }
34076 EXPORT_SYMBOL_GPL(leave_mm);
34077diff --git a/arch/x86/mm/uderef_64.c b/arch/x86/mm/uderef_64.c
34078new file mode 100644
34079index 0000000..dace51c
34080--- /dev/null
34081+++ b/arch/x86/mm/uderef_64.c
34082@@ -0,0 +1,37 @@
34083+#include <linux/mm.h>
34084+#include <asm/pgtable.h>
34085+#include <asm/uaccess.h>
34086+
34087+#ifdef CONFIG_PAX_MEMORY_UDEREF
34088+/* PaX: due to the special call convention these functions must
34089+ * - remain leaf functions under all configurations,
34090+ * - never be called directly, only dereferenced from the wrappers.
34091+ */
34092+void __pax_open_userland(void)
34093+{
34094+ unsigned int cpu;
34095+
34096+ if (unlikely(!segment_eq(get_fs(), USER_DS)))
34097+ return;
34098+
34099+ cpu = raw_get_cpu();
34100+ BUG_ON((read_cr3() & ~PAGE_MASK) != PCID_KERNEL);
34101+ write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER | PCID_NOFLUSH);
34102+ raw_put_cpu_no_resched();
34103+}
34104+EXPORT_SYMBOL(__pax_open_userland);
34105+
34106+void __pax_close_userland(void)
34107+{
34108+ unsigned int cpu;
34109+
34110+ if (unlikely(!segment_eq(get_fs(), USER_DS)))
34111+ return;
34112+
34113+ cpu = raw_get_cpu();
34114+ BUG_ON((read_cr3() & ~PAGE_MASK) != PCID_USER);
34115+ write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL | PCID_NOFLUSH);
34116+ raw_put_cpu_no_resched();
34117+}
34118+EXPORT_SYMBOL(__pax_close_userland);
34119+#endif
34120diff --git a/arch/x86/net/bpf_jit.S b/arch/x86/net/bpf_jit.S
34121index 0149575..f746de8 100644
34122--- a/arch/x86/net/bpf_jit.S
34123+++ b/arch/x86/net/bpf_jit.S
34124@@ -9,6 +9,7 @@
34125 */
34126 #include <linux/linkage.h>
34127 #include <asm/dwarf2.h>
34128+#include <asm/alternative-asm.h>
34129
34130 /*
34131 * Calling convention :
34132@@ -35,6 +36,7 @@ sk_load_word_positive_offset:
34133 jle bpf_slow_path_word
34134 mov (SKBDATA,%rsi),%eax
34135 bswap %eax /* ntohl() */
34136+ pax_force_retaddr
34137 ret
34138
34139 sk_load_half:
34140@@ -52,6 +54,7 @@ sk_load_half_positive_offset:
34141 jle bpf_slow_path_half
34142 movzwl (SKBDATA,%rsi),%eax
34143 rol $8,%ax # ntohs()
34144+ pax_force_retaddr
34145 ret
34146
34147 sk_load_byte:
34148@@ -66,6 +69,7 @@ sk_load_byte_positive_offset:
34149 cmp %esi,%r9d /* if (offset >= hlen) goto bpf_slow_path_byte */
34150 jle bpf_slow_path_byte
34151 movzbl (SKBDATA,%rsi),%eax
34152+ pax_force_retaddr
34153 ret
34154
34155 /**
34156@@ -87,6 +91,7 @@ sk_load_byte_msh_positive_offset:
34157 movzbl (SKBDATA,%rsi),%ebx
34158 and $15,%bl
34159 shl $2,%bl
34160+ pax_force_retaddr
34161 ret
34162
34163 /* rsi contains offset and can be scratched */
34164@@ -109,6 +114,7 @@ bpf_slow_path_word:
34165 js bpf_error
34166 mov -12(%rbp),%eax
34167 bswap %eax
34168+ pax_force_retaddr
34169 ret
34170
34171 bpf_slow_path_half:
34172@@ -117,12 +123,14 @@ bpf_slow_path_half:
34173 mov -12(%rbp),%ax
34174 rol $8,%ax
34175 movzwl %ax,%eax
34176+ pax_force_retaddr
34177 ret
34178
34179 bpf_slow_path_byte:
34180 bpf_slow_path_common(1)
34181 js bpf_error
34182 movzbl -12(%rbp),%eax
34183+ pax_force_retaddr
34184 ret
34185
34186 bpf_slow_path_byte_msh:
34187@@ -133,6 +141,7 @@ bpf_slow_path_byte_msh:
34188 and $15,%al
34189 shl $2,%al
34190 xchg %eax,%ebx
34191+ pax_force_retaddr
34192 ret
34193
34194 #define sk_negative_common(SIZE) \
34195@@ -157,6 +166,7 @@ sk_load_word_negative_offset:
34196 sk_negative_common(4)
34197 mov (%rax), %eax
34198 bswap %eax
34199+ pax_force_retaddr
34200 ret
34201
34202 bpf_slow_path_half_neg:
34203@@ -168,6 +178,7 @@ sk_load_half_negative_offset:
34204 mov (%rax),%ax
34205 rol $8,%ax
34206 movzwl %ax,%eax
34207+ pax_force_retaddr
34208 ret
34209
34210 bpf_slow_path_byte_neg:
34211@@ -177,6 +188,7 @@ sk_load_byte_negative_offset:
34212 .globl sk_load_byte_negative_offset
34213 sk_negative_common(1)
34214 movzbl (%rax), %eax
34215+ pax_force_retaddr
34216 ret
34217
34218 bpf_slow_path_byte_msh_neg:
34219@@ -190,6 +202,7 @@ sk_load_byte_msh_negative_offset:
34220 and $15,%al
34221 shl $2,%al
34222 xchg %eax,%ebx
34223+ pax_force_retaddr
34224 ret
34225
34226 bpf_error:
34227@@ -197,4 +210,5 @@ bpf_error:
34228 xor %eax,%eax
34229 mov -8(%rbp),%rbx
34230 leaveq
34231+ pax_force_retaddr
34232 ret
34233diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
34234index af2d431..3cf24f0b 100644
34235--- a/arch/x86/net/bpf_jit_comp.c
34236+++ b/arch/x86/net/bpf_jit_comp.c
34237@@ -50,13 +50,90 @@ static inline u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len)
34238 return ptr + len;
34239 }
34240
34241+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
34242+#define MAX_INSTR_CODE_SIZE 96
34243+#else
34244+#define MAX_INSTR_CODE_SIZE 64
34245+#endif
34246+
34247 #define EMIT(bytes, len) do { prog = emit_code(prog, bytes, len); } while (0)
34248
34249 #define EMIT1(b1) EMIT(b1, 1)
34250 #define EMIT2(b1, b2) EMIT((b1) + ((b2) << 8), 2)
34251 #define EMIT3(b1, b2, b3) EMIT((b1) + ((b2) << 8) + ((b3) << 16), 3)
34252 #define EMIT4(b1, b2, b3, b4) EMIT((b1) + ((b2) << 8) + ((b3) << 16) + ((b4) << 24), 4)
34253+
34254+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
34255+/* original constant will appear in ecx */
34256+#define DILUTE_CONST_SEQUENCE(_off, _key) \
34257+do { \
34258+ /* mov ecx, randkey */ \
34259+ EMIT1(0xb9); \
34260+ EMIT(_key, 4); \
34261+ /* xor ecx, randkey ^ off */ \
34262+ EMIT2(0x81, 0xf1); \
34263+ EMIT((_key) ^ (_off), 4); \
34264+} while (0)
34265+
34266+#define EMIT1_off32(b1, _off) \
34267+do { \
34268+ switch (b1) { \
34269+ case 0x05: /* add eax, imm32 */ \
34270+ case 0x2d: /* sub eax, imm32 */ \
34271+ case 0x25: /* and eax, imm32 */ \
34272+ case 0x0d: /* or eax, imm32 */ \
34273+ case 0xb8: /* mov eax, imm32 */ \
34274+ case 0x35: /* xor eax, imm32 */ \
34275+ case 0x3d: /* cmp eax, imm32 */ \
34276+ case 0xa9: /* test eax, imm32 */ \
34277+ DILUTE_CONST_SEQUENCE(_off, randkey); \
34278+ EMIT2((b1) - 4, 0xc8); /* convert imm instruction to eax, ecx */\
34279+ break; \
34280+ case 0xbb: /* mov ebx, imm32 */ \
34281+ DILUTE_CONST_SEQUENCE(_off, randkey); \
34282+ /* mov ebx, ecx */ \
34283+ EMIT2(0x89, 0xcb); \
34284+ break; \
34285+ case 0xbe: /* mov esi, imm32 */ \
34286+ DILUTE_CONST_SEQUENCE(_off, randkey); \
34287+ /* mov esi, ecx */ \
34288+ EMIT2(0x89, 0xce); \
34289+ break; \
34290+ case 0xe8: /* call rel imm32, always to known funcs */ \
34291+ EMIT1(b1); \
34292+ EMIT(_off, 4); \
34293+ break; \
34294+ case 0xe9: /* jmp rel imm32 */ \
34295+ EMIT1(b1); \
34296+ EMIT(_off, 4); \
34297+ /* prevent fall-through, we're not called if off = 0 */ \
34298+ EMIT(0xcccccccc, 4); \
34299+ EMIT(0xcccccccc, 4); \
34300+ break; \
34301+ default: \
34302+ BUILD_BUG(); \
34303+ } \
34304+} while (0)
34305+
34306+#define EMIT2_off32(b1, b2, _off) \
34307+do { \
34308+ if ((b1) == 0x8d && (b2) == 0xb3) { /* lea esi, [rbx+imm32] */ \
34309+ EMIT2(0x8d, 0xb3); /* lea esi, [rbx+randkey] */ \
34310+ EMIT(randkey, 4); \
34311+ EMIT2(0x8d, 0xb6); /* lea esi, [esi+off-randkey] */ \
34312+ EMIT((_off) - randkey, 4); \
34313+ } else if ((b1) == 0x69 && (b2) == 0xc0) { /* imul eax, imm32 */\
34314+ DILUTE_CONST_SEQUENCE(_off, randkey); \
34315+ /* imul eax, ecx */ \
34316+ EMIT3(0x0f, 0xaf, 0xc1); \
34317+ } else { \
34318+ BUILD_BUG(); \
34319+ } \
34320+} while (0)
34321+#else
34322 #define EMIT1_off32(b1, off) do { EMIT1(b1); EMIT(off, 4);} while (0)
34323+#define EMIT2_off32(b1, b2, off) do { EMIT2(b1, b2); EMIT(off, 4);} while (0)
34324+#endif
34325
34326 #define CLEAR_A() EMIT2(0x31, 0xc0) /* xor %eax,%eax */
34327 #define CLEAR_X() EMIT2(0x31, 0xdb) /* xor %ebx,%ebx */
34328@@ -91,6 +168,24 @@ do { \
34329 #define X86_JBE 0x76
34330 #define X86_JA 0x77
34331
34332+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
34333+#define APPEND_FLOW_VERIFY() \
34334+do { \
34335+ /* mov ecx, randkey */ \
34336+ EMIT1(0xb9); \
34337+ EMIT(randkey, 4); \
34338+ /* cmp ecx, randkey */ \
34339+ EMIT2(0x81, 0xf9); \
34340+ EMIT(randkey, 4); \
34341+ /* jz after 8 int 3s */ \
34342+ EMIT2(0x74, 0x08); \
34343+ EMIT(0xcccccccc, 4); \
34344+ EMIT(0xcccccccc, 4); \
34345+} while (0)
34346+#else
34347+#define APPEND_FLOW_VERIFY() do { } while (0)
34348+#endif
34349+
34350 #define EMIT_COND_JMP(op, offset) \
34351 do { \
34352 if (is_near(offset)) \
34353@@ -98,6 +193,7 @@ do { \
34354 else { \
34355 EMIT2(0x0f, op + 0x10); \
34356 EMIT(offset, 4); /* jxx .+off32 */ \
34357+ APPEND_FLOW_VERIFY(); \
34358 } \
34359 } while (0)
34360
34361@@ -145,55 +241,54 @@ static int pkt_type_offset(void)
34362 return -1;
34363 }
34364
34365-struct bpf_binary_header {
34366- unsigned int pages;
34367- /* Note : for security reasons, bpf code will follow a randomly
34368- * sized amount of int3 instructions
34369- */
34370- u8 image[];
34371-};
34372-
34373-static struct bpf_binary_header *bpf_alloc_binary(unsigned int proglen,
34374+/* Note : for security reasons, bpf code will follow a randomly
34375+ * sized amount of int3 instructions
34376+ */
34377+static u8 *bpf_alloc_binary(unsigned int proglen,
34378 u8 **image_ptr)
34379 {
34380 unsigned int sz, hole;
34381- struct bpf_binary_header *header;
34382+ u8 *header;
34383
34384 /* Most of BPF filters are really small,
34385 * but if some of them fill a page, allow at least
34386 * 128 extra bytes to insert a random section of int3
34387 */
34388- sz = round_up(proglen + sizeof(*header) + 128, PAGE_SIZE);
34389- header = module_alloc(sz);
34390+ sz = round_up(proglen + 128, PAGE_SIZE);
34391+ header = module_alloc_exec(sz);
34392 if (!header)
34393 return NULL;
34394
34395+ pax_open_kernel();
34396 memset(header, 0xcc, sz); /* fill whole space with int3 instructions */
34397+ pax_close_kernel();
34398
34399- header->pages = sz / PAGE_SIZE;
34400- hole = min(sz - (proglen + sizeof(*header)), PAGE_SIZE - sizeof(*header));
34401+ hole = PAGE_SIZE - (proglen & ~PAGE_MASK);
34402
34403 /* insert a random number of int3 instructions before BPF code */
34404- *image_ptr = &header->image[prandom_u32() % hole];
34405+ *image_ptr = &header[prandom_u32() % hole];
34406 return header;
34407 }
34408
34409 void bpf_jit_compile(struct sk_filter *fp)
34410 {
34411- u8 temp[64];
34412+ u8 temp[MAX_INSTR_CODE_SIZE];
34413 u8 *prog;
34414 unsigned int proglen, oldproglen = 0;
34415 int ilen, i;
34416 int t_offset, f_offset;
34417 u8 t_op, f_op, seen = 0, pass;
34418 u8 *image = NULL;
34419- struct bpf_binary_header *header = NULL;
34420+ u8 *header = NULL;
34421 u8 *func;
34422 int pc_ret0 = -1; /* bpf index of first RET #0 instruction (if any) */
34423 unsigned int cleanup_addr; /* epilogue code offset */
34424 unsigned int *addrs;
34425 const struct sock_filter *filter = fp->insns;
34426 int flen = fp->len;
34427+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
34428+ unsigned int randkey;
34429+#endif
34430
34431 if (!bpf_jit_enable)
34432 return;
34433@@ -203,10 +298,10 @@ void bpf_jit_compile(struct sk_filter *fp)
34434 return;
34435
34436 /* Before first pass, make a rough estimation of addrs[]
34437- * each bpf instruction is translated to less than 64 bytes
34438+ * each bpf instruction is translated to less than MAX_INSTR_CODE_SIZE bytes
34439 */
34440 for (proglen = 0, i = 0; i < flen; i++) {
34441- proglen += 64;
34442+ proglen += MAX_INSTR_CODE_SIZE;
34443 addrs[i] = proglen;
34444 }
34445 cleanup_addr = proglen; /* epilogue address */
34446@@ -285,6 +380,10 @@ void bpf_jit_compile(struct sk_filter *fp)
34447 for (i = 0; i < flen; i++) {
34448 unsigned int K = filter[i].k;
34449
34450+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
34451+ randkey = prandom_u32();
34452+#endif
34453+
34454 switch (filter[i].code) {
34455 case BPF_S_ALU_ADD_X: /* A += X; */
34456 seen |= SEEN_XREG;
34457@@ -317,10 +416,8 @@ void bpf_jit_compile(struct sk_filter *fp)
34458 case BPF_S_ALU_MUL_K: /* A *= K */
34459 if (is_imm8(K))
34460 EMIT3(0x6b, 0xc0, K); /* imul imm8,%eax,%eax */
34461- else {
34462- EMIT2(0x69, 0xc0); /* imul imm32,%eax */
34463- EMIT(K, 4);
34464- }
34465+ else
34466+ EMIT2_off32(0x69, 0xc0, K); /* imul imm32,%eax */
34467 break;
34468 case BPF_S_ALU_DIV_X: /* A /= X; */
34469 seen |= SEEN_XREG;
34470@@ -364,7 +461,11 @@ void bpf_jit_compile(struct sk_filter *fp)
34471 break;
34472 }
34473 EMIT2(0x31, 0xd2); /* xor %edx,%edx */
34474+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
34475+ DILUTE_CONST_SEQUENCE(K, randkey);
34476+#else
34477 EMIT1(0xb9);EMIT(K, 4); /* mov imm32,%ecx */
34478+#endif
34479 EMIT2(0xf7, 0xf1); /* div %ecx */
34480 EMIT2(0x89, 0xd0); /* mov %edx,%eax */
34481 break;
34482@@ -372,7 +473,11 @@ void bpf_jit_compile(struct sk_filter *fp)
34483 if (K == 1)
34484 break;
34485 EMIT2(0x31, 0xd2); /* xor %edx,%edx */
34486+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
34487+ DILUTE_CONST_SEQUENCE(K, randkey);
34488+#else
34489 EMIT1(0xb9);EMIT(K, 4); /* mov imm32,%ecx */
34490+#endif
34491 EMIT2(0xf7, 0xf1); /* div %ecx */
34492 break;
34493 case BPF_S_ALU_AND_X:
34494@@ -643,8 +748,7 @@ common_load_ind: seen |= SEEN_DATAREF | SEEN_XREG;
34495 if (is_imm8(K)) {
34496 EMIT3(0x8d, 0x73, K); /* lea imm8(%rbx), %esi */
34497 } else {
34498- EMIT2(0x8d, 0xb3); /* lea imm32(%rbx),%esi */
34499- EMIT(K, 4);
34500+ EMIT2_off32(0x8d, 0xb3, K); /* lea imm32(%rbx),%esi */
34501 }
34502 } else {
34503 EMIT2(0x89,0xde); /* mov %ebx,%esi */
34504@@ -734,10 +838,12 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
34505 if (unlikely(proglen + ilen > oldproglen)) {
34506 pr_err("bpb_jit_compile fatal error\n");
34507 kfree(addrs);
34508- module_free(NULL, header);
34509+ module_free_exec(NULL, image);
34510 return;
34511 }
34512+ pax_open_kernel();
34513 memcpy(image + proglen, temp, ilen);
34514+ pax_close_kernel();
34515 }
34516 proglen += ilen;
34517 addrs[i] = proglen;
34518@@ -770,7 +876,6 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
34519
34520 if (image) {
34521 bpf_flush_icache(header, image + proglen);
34522- set_memory_ro((unsigned long)header, header->pages);
34523 fp->bpf_func = (void *)image;
34524 }
34525 out:
34526@@ -782,10 +887,9 @@ static void bpf_jit_free_deferred(struct work_struct *work)
34527 {
34528 struct sk_filter *fp = container_of(work, struct sk_filter, work);
34529 unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK;
34530- struct bpf_binary_header *header = (void *)addr;
34531
34532- set_memory_rw(addr, header->pages);
34533- module_free(NULL, header);
34534+ set_memory_rw(addr, 1);
34535+ module_free_exec(NULL, (void *)addr);
34536 kfree(fp);
34537 }
34538
34539diff --git a/arch/x86/oprofile/backtrace.c b/arch/x86/oprofile/backtrace.c
34540index 5d04be5..2beeaa2 100644
34541--- a/arch/x86/oprofile/backtrace.c
34542+++ b/arch/x86/oprofile/backtrace.c
34543@@ -46,11 +46,11 @@ dump_user_backtrace_32(struct stack_frame_ia32 *head)
34544 struct stack_frame_ia32 *fp;
34545 unsigned long bytes;
34546
34547- bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
34548+ bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
34549 if (bytes != 0)
34550 return NULL;
34551
34552- fp = (struct stack_frame_ia32 *) compat_ptr(bufhead[0].next_frame);
34553+ fp = (struct stack_frame_ia32 __force_kernel *) compat_ptr(bufhead[0].next_frame);
34554
34555 oprofile_add_trace(bufhead[0].return_address);
34556
34557@@ -92,7 +92,7 @@ static struct stack_frame *dump_user_backtrace(struct stack_frame *head)
34558 struct stack_frame bufhead[2];
34559 unsigned long bytes;
34560
34561- bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
34562+ bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
34563 if (bytes != 0)
34564 return NULL;
34565
34566@@ -111,7 +111,7 @@ x86_backtrace(struct pt_regs * const regs, unsigned int depth)
34567 {
34568 struct stack_frame *head = (struct stack_frame *)frame_pointer(regs);
34569
34570- if (!user_mode_vm(regs)) {
34571+ if (!user_mode(regs)) {
34572 unsigned long stack = kernel_stack_pointer(regs);
34573 if (depth)
34574 dump_trace(NULL, regs, (unsigned long *)stack, 0,
34575diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c
34576index 6890d84..1dad1f1 100644
34577--- a/arch/x86/oprofile/nmi_int.c
34578+++ b/arch/x86/oprofile/nmi_int.c
34579@@ -23,6 +23,7 @@
34580 #include <asm/nmi.h>
34581 #include <asm/msr.h>
34582 #include <asm/apic.h>
34583+#include <asm/pgtable.h>
34584
34585 #include "op_counter.h"
34586 #include "op_x86_model.h"
34587@@ -774,8 +775,11 @@ int __init op_nmi_init(struct oprofile_operations *ops)
34588 if (ret)
34589 return ret;
34590
34591- if (!model->num_virt_counters)
34592- model->num_virt_counters = model->num_counters;
34593+ if (!model->num_virt_counters) {
34594+ pax_open_kernel();
34595+ *(unsigned int *)&model->num_virt_counters = model->num_counters;
34596+ pax_close_kernel();
34597+ }
34598
34599 mux_init(ops);
34600
34601diff --git a/arch/x86/oprofile/op_model_amd.c b/arch/x86/oprofile/op_model_amd.c
34602index 50d86c0..7985318 100644
34603--- a/arch/x86/oprofile/op_model_amd.c
34604+++ b/arch/x86/oprofile/op_model_amd.c
34605@@ -519,9 +519,11 @@ static int op_amd_init(struct oprofile_operations *ops)
34606 num_counters = AMD64_NUM_COUNTERS;
34607 }
34608
34609- op_amd_spec.num_counters = num_counters;
34610- op_amd_spec.num_controls = num_counters;
34611- op_amd_spec.num_virt_counters = max(num_counters, NUM_VIRT_COUNTERS);
34612+ pax_open_kernel();
34613+ *(unsigned int *)&op_amd_spec.num_counters = num_counters;
34614+ *(unsigned int *)&op_amd_spec.num_controls = num_counters;
34615+ *(unsigned int *)&op_amd_spec.num_virt_counters = max(num_counters, NUM_VIRT_COUNTERS);
34616+ pax_close_kernel();
34617
34618 return 0;
34619 }
34620diff --git a/arch/x86/oprofile/op_model_ppro.c b/arch/x86/oprofile/op_model_ppro.c
34621index d90528e..0127e2b 100644
34622--- a/arch/x86/oprofile/op_model_ppro.c
34623+++ b/arch/x86/oprofile/op_model_ppro.c
34624@@ -19,6 +19,7 @@
34625 #include <asm/msr.h>
34626 #include <asm/apic.h>
34627 #include <asm/nmi.h>
34628+#include <asm/pgtable.h>
34629
34630 #include "op_x86_model.h"
34631 #include "op_counter.h"
34632@@ -221,8 +222,10 @@ static void arch_perfmon_setup_counters(void)
34633
34634 num_counters = min((int)eax.split.num_counters, OP_MAX_COUNTER);
34635
34636- op_arch_perfmon_spec.num_counters = num_counters;
34637- op_arch_perfmon_spec.num_controls = num_counters;
34638+ pax_open_kernel();
34639+ *(unsigned int *)&op_arch_perfmon_spec.num_counters = num_counters;
34640+ *(unsigned int *)&op_arch_perfmon_spec.num_controls = num_counters;
34641+ pax_close_kernel();
34642 }
34643
34644 static int arch_perfmon_init(struct oprofile_operations *ignore)
34645diff --git a/arch/x86/oprofile/op_x86_model.h b/arch/x86/oprofile/op_x86_model.h
34646index 71e8a67..6a313bb 100644
34647--- a/arch/x86/oprofile/op_x86_model.h
34648+++ b/arch/x86/oprofile/op_x86_model.h
34649@@ -52,7 +52,7 @@ struct op_x86_model_spec {
34650 void (*switch_ctrl)(struct op_x86_model_spec const *model,
34651 struct op_msrs const * const msrs);
34652 #endif
34653-};
34654+} __do_const;
34655
34656 struct op_counter_config;
34657
34658diff --git a/arch/x86/pci/intel_mid_pci.c b/arch/x86/pci/intel_mid_pci.c
34659index 84b9d67..260e5ff 100644
34660--- a/arch/x86/pci/intel_mid_pci.c
34661+++ b/arch/x86/pci/intel_mid_pci.c
34662@@ -245,7 +245,7 @@ int __init intel_mid_pci_init(void)
34663 pr_info("Intel MID platform detected, using MID PCI ops\n");
34664 pci_mmcfg_late_init();
34665 pcibios_enable_irq = intel_mid_pci_irq_enable;
34666- pci_root_ops = intel_mid_pci_ops;
34667+ memcpy((void *)&pci_root_ops, &intel_mid_pci_ops, sizeof pci_root_ops);
34668 pci_soc_mode = 1;
34669 /* Continue with standard init */
34670 return 1;
34671diff --git a/arch/x86/pci/irq.c b/arch/x86/pci/irq.c
34672index 372e9b8..e775a6c 100644
34673--- a/arch/x86/pci/irq.c
34674+++ b/arch/x86/pci/irq.c
34675@@ -50,7 +50,7 @@ struct irq_router {
34676 struct irq_router_handler {
34677 u16 vendor;
34678 int (*probe)(struct irq_router *r, struct pci_dev *router, u16 device);
34679-};
34680+} __do_const;
34681
34682 int (*pcibios_enable_irq)(struct pci_dev *dev) = pirq_enable_irq;
34683 void (*pcibios_disable_irq)(struct pci_dev *dev) = NULL;
34684@@ -794,7 +794,7 @@ static __init int pico_router_probe(struct irq_router *r, struct pci_dev *router
34685 return 0;
34686 }
34687
34688-static __initdata struct irq_router_handler pirq_routers[] = {
34689+static __initconst const struct irq_router_handler pirq_routers[] = {
34690 { PCI_VENDOR_ID_INTEL, intel_router_probe },
34691 { PCI_VENDOR_ID_AL, ali_router_probe },
34692 { PCI_VENDOR_ID_ITE, ite_router_probe },
34693@@ -821,7 +821,7 @@ static struct pci_dev *pirq_router_dev;
34694 static void __init pirq_find_router(struct irq_router *r)
34695 {
34696 struct irq_routing_table *rt = pirq_table;
34697- struct irq_router_handler *h;
34698+ const struct irq_router_handler *h;
34699
34700 #ifdef CONFIG_PCI_BIOS
34701 if (!rt->signature) {
34702@@ -1094,7 +1094,7 @@ static int __init fix_acer_tm360_irqrouting(const struct dmi_system_id *d)
34703 return 0;
34704 }
34705
34706-static struct dmi_system_id __initdata pciirq_dmi_table[] = {
34707+static const struct dmi_system_id __initconst pciirq_dmi_table[] = {
34708 {
34709 .callback = fix_broken_hp_bios_irq9,
34710 .ident = "HP Pavilion N5400 Series Laptop",
34711diff --git a/arch/x86/pci/pcbios.c b/arch/x86/pci/pcbios.c
34712index c77b24a..c979855 100644
34713--- a/arch/x86/pci/pcbios.c
34714+++ b/arch/x86/pci/pcbios.c
34715@@ -79,7 +79,7 @@ union bios32 {
34716 static struct {
34717 unsigned long address;
34718 unsigned short segment;
34719-} bios32_indirect = { 0, __KERNEL_CS };
34720+} bios32_indirect __read_only = { 0, __PCIBIOS_CS };
34721
34722 /*
34723 * Returns the entry point for the given service, NULL on error
34724@@ -92,37 +92,80 @@ static unsigned long bios32_service(unsigned long service)
34725 unsigned long length; /* %ecx */
34726 unsigned long entry; /* %edx */
34727 unsigned long flags;
34728+ struct desc_struct d, *gdt;
34729
34730 local_irq_save(flags);
34731- __asm__("lcall *(%%edi); cld"
34732+
34733+ gdt = get_cpu_gdt_table(smp_processor_id());
34734+
34735+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
34736+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
34737+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
34738+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
34739+
34740+ __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
34741 : "=a" (return_code),
34742 "=b" (address),
34743 "=c" (length),
34744 "=d" (entry)
34745 : "0" (service),
34746 "1" (0),
34747- "D" (&bios32_indirect));
34748+ "D" (&bios32_indirect),
34749+ "r"(__PCIBIOS_DS)
34750+ : "memory");
34751+
34752+ pax_open_kernel();
34753+ gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
34754+ gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
34755+ gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
34756+ gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
34757+ pax_close_kernel();
34758+
34759 local_irq_restore(flags);
34760
34761 switch (return_code) {
34762- case 0:
34763- return address + entry;
34764- case 0x80: /* Not present */
34765- printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
34766- return 0;
34767- default: /* Shouldn't happen */
34768- printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
34769- service, return_code);
34770+ case 0: {
34771+ int cpu;
34772+ unsigned char flags;
34773+
34774+ printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
34775+ if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
34776+ printk(KERN_WARNING "bios32_service: not valid\n");
34777 return 0;
34778+ }
34779+ address = address + PAGE_OFFSET;
34780+ length += 16UL; /* some BIOSs underreport this... */
34781+ flags = 4;
34782+ if (length >= 64*1024*1024) {
34783+ length >>= PAGE_SHIFT;
34784+ flags |= 8;
34785+ }
34786+
34787+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
34788+ gdt = get_cpu_gdt_table(cpu);
34789+ pack_descriptor(&d, address, length, 0x9b, flags);
34790+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
34791+ pack_descriptor(&d, address, length, 0x93, flags);
34792+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
34793+ }
34794+ return entry;
34795+ }
34796+ case 0x80: /* Not present */
34797+ printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
34798+ return 0;
34799+ default: /* Shouldn't happen */
34800+ printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
34801+ service, return_code);
34802+ return 0;
34803 }
34804 }
34805
34806 static struct {
34807 unsigned long address;
34808 unsigned short segment;
34809-} pci_indirect = { 0, __KERNEL_CS };
34810+} pci_indirect __read_only = { 0, __PCIBIOS_CS };
34811
34812-static int pci_bios_present;
34813+static int pci_bios_present __read_only;
34814
34815 static int check_pcibios(void)
34816 {
34817@@ -131,11 +174,13 @@ static int check_pcibios(void)
34818 unsigned long flags, pcibios_entry;
34819
34820 if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
34821- pci_indirect.address = pcibios_entry + PAGE_OFFSET;
34822+ pci_indirect.address = pcibios_entry;
34823
34824 local_irq_save(flags);
34825- __asm__(
34826- "lcall *(%%edi); cld\n\t"
34827+ __asm__("movw %w6, %%ds\n\t"
34828+ "lcall *%%ss:(%%edi); cld\n\t"
34829+ "push %%ss\n\t"
34830+ "pop %%ds\n\t"
34831 "jc 1f\n\t"
34832 "xor %%ah, %%ah\n"
34833 "1:"
34834@@ -144,7 +189,8 @@ static int check_pcibios(void)
34835 "=b" (ebx),
34836 "=c" (ecx)
34837 : "1" (PCIBIOS_PCI_BIOS_PRESENT),
34838- "D" (&pci_indirect)
34839+ "D" (&pci_indirect),
34840+ "r" (__PCIBIOS_DS)
34841 : "memory");
34842 local_irq_restore(flags);
34843
34844@@ -189,7 +235,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
34845
34846 switch (len) {
34847 case 1:
34848- __asm__("lcall *(%%esi); cld\n\t"
34849+ __asm__("movw %w6, %%ds\n\t"
34850+ "lcall *%%ss:(%%esi); cld\n\t"
34851+ "push %%ss\n\t"
34852+ "pop %%ds\n\t"
34853 "jc 1f\n\t"
34854 "xor %%ah, %%ah\n"
34855 "1:"
34856@@ -198,7 +247,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
34857 : "1" (PCIBIOS_READ_CONFIG_BYTE),
34858 "b" (bx),
34859 "D" ((long)reg),
34860- "S" (&pci_indirect));
34861+ "S" (&pci_indirect),
34862+ "r" (__PCIBIOS_DS));
34863 /*
34864 * Zero-extend the result beyond 8 bits, do not trust the
34865 * BIOS having done it:
34866@@ -206,7 +256,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
34867 *value &= 0xff;
34868 break;
34869 case 2:
34870- __asm__("lcall *(%%esi); cld\n\t"
34871+ __asm__("movw %w6, %%ds\n\t"
34872+ "lcall *%%ss:(%%esi); cld\n\t"
34873+ "push %%ss\n\t"
34874+ "pop %%ds\n\t"
34875 "jc 1f\n\t"
34876 "xor %%ah, %%ah\n"
34877 "1:"
34878@@ -215,7 +268,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
34879 : "1" (PCIBIOS_READ_CONFIG_WORD),
34880 "b" (bx),
34881 "D" ((long)reg),
34882- "S" (&pci_indirect));
34883+ "S" (&pci_indirect),
34884+ "r" (__PCIBIOS_DS));
34885 /*
34886 * Zero-extend the result beyond 16 bits, do not trust the
34887 * BIOS having done it:
34888@@ -223,7 +277,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
34889 *value &= 0xffff;
34890 break;
34891 case 4:
34892- __asm__("lcall *(%%esi); cld\n\t"
34893+ __asm__("movw %w6, %%ds\n\t"
34894+ "lcall *%%ss:(%%esi); cld\n\t"
34895+ "push %%ss\n\t"
34896+ "pop %%ds\n\t"
34897 "jc 1f\n\t"
34898 "xor %%ah, %%ah\n"
34899 "1:"
34900@@ -232,7 +289,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
34901 : "1" (PCIBIOS_READ_CONFIG_DWORD),
34902 "b" (bx),
34903 "D" ((long)reg),
34904- "S" (&pci_indirect));
34905+ "S" (&pci_indirect),
34906+ "r" (__PCIBIOS_DS));
34907 break;
34908 }
34909
34910@@ -256,7 +314,10 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
34911
34912 switch (len) {
34913 case 1:
34914- __asm__("lcall *(%%esi); cld\n\t"
34915+ __asm__("movw %w6, %%ds\n\t"
34916+ "lcall *%%ss:(%%esi); cld\n\t"
34917+ "push %%ss\n\t"
34918+ "pop %%ds\n\t"
34919 "jc 1f\n\t"
34920 "xor %%ah, %%ah\n"
34921 "1:"
34922@@ -265,10 +326,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
34923 "c" (value),
34924 "b" (bx),
34925 "D" ((long)reg),
34926- "S" (&pci_indirect));
34927+ "S" (&pci_indirect),
34928+ "r" (__PCIBIOS_DS));
34929 break;
34930 case 2:
34931- __asm__("lcall *(%%esi); cld\n\t"
34932+ __asm__("movw %w6, %%ds\n\t"
34933+ "lcall *%%ss:(%%esi); cld\n\t"
34934+ "push %%ss\n\t"
34935+ "pop %%ds\n\t"
34936 "jc 1f\n\t"
34937 "xor %%ah, %%ah\n"
34938 "1:"
34939@@ -277,10 +342,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
34940 "c" (value),
34941 "b" (bx),
34942 "D" ((long)reg),
34943- "S" (&pci_indirect));
34944+ "S" (&pci_indirect),
34945+ "r" (__PCIBIOS_DS));
34946 break;
34947 case 4:
34948- __asm__("lcall *(%%esi); cld\n\t"
34949+ __asm__("movw %w6, %%ds\n\t"
34950+ "lcall *%%ss:(%%esi); cld\n\t"
34951+ "push %%ss\n\t"
34952+ "pop %%ds\n\t"
34953 "jc 1f\n\t"
34954 "xor %%ah, %%ah\n"
34955 "1:"
34956@@ -289,7 +358,8 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
34957 "c" (value),
34958 "b" (bx),
34959 "D" ((long)reg),
34960- "S" (&pci_indirect));
34961+ "S" (&pci_indirect),
34962+ "r" (__PCIBIOS_DS));
34963 break;
34964 }
34965
34966@@ -394,10 +464,13 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
34967
34968 DBG("PCI: Fetching IRQ routing table... ");
34969 __asm__("push %%es\n\t"
34970+ "movw %w8, %%ds\n\t"
34971 "push %%ds\n\t"
34972 "pop %%es\n\t"
34973- "lcall *(%%esi); cld\n\t"
34974+ "lcall *%%ss:(%%esi); cld\n\t"
34975 "pop %%es\n\t"
34976+ "push %%ss\n\t"
34977+ "pop %%ds\n"
34978 "jc 1f\n\t"
34979 "xor %%ah, %%ah\n"
34980 "1:"
34981@@ -408,7 +481,8 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
34982 "1" (0),
34983 "D" ((long) &opt),
34984 "S" (&pci_indirect),
34985- "m" (opt)
34986+ "m" (opt),
34987+ "r" (__PCIBIOS_DS)
34988 : "memory");
34989 DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
34990 if (ret & 0xff00)
34991@@ -432,7 +506,10 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
34992 {
34993 int ret;
34994
34995- __asm__("lcall *(%%esi); cld\n\t"
34996+ __asm__("movw %w5, %%ds\n\t"
34997+ "lcall *%%ss:(%%esi); cld\n\t"
34998+ "push %%ss\n\t"
34999+ "pop %%ds\n"
35000 "jc 1f\n\t"
35001 "xor %%ah, %%ah\n"
35002 "1:"
35003@@ -440,7 +517,8 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
35004 : "0" (PCIBIOS_SET_PCI_HW_INT),
35005 "b" ((dev->bus->number << 8) | dev->devfn),
35006 "c" ((irq << 8) | (pin + 10)),
35007- "S" (&pci_indirect));
35008+ "S" (&pci_indirect),
35009+ "r" (__PCIBIOS_DS));
35010 return !(ret & 0xff00);
35011 }
35012 EXPORT_SYMBOL(pcibios_set_irq_routing);
35013diff --git a/arch/x86/platform/efi/efi_32.c b/arch/x86/platform/efi/efi_32.c
35014index 9ee3491..872192f 100644
35015--- a/arch/x86/platform/efi/efi_32.c
35016+++ b/arch/x86/platform/efi/efi_32.c
35017@@ -59,11 +59,22 @@ void efi_call_phys_prelog(void)
35018 {
35019 struct desc_ptr gdt_descr;
35020
35021+#ifdef CONFIG_PAX_KERNEXEC
35022+ struct desc_struct d;
35023+#endif
35024+
35025 local_irq_save(efi_rt_eflags);
35026
35027 load_cr3(initial_page_table);
35028 __flush_tlb_all();
35029
35030+#ifdef CONFIG_PAX_KERNEXEC
35031+ pack_descriptor(&d, 0, 0xFFFFF, 0x9B, 0xC);
35032+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
35033+ pack_descriptor(&d, 0, 0xFFFFF, 0x93, 0xC);
35034+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
35035+#endif
35036+
35037 gdt_descr.address = __pa(get_cpu_gdt_table(0));
35038 gdt_descr.size = GDT_SIZE - 1;
35039 load_gdt(&gdt_descr);
35040@@ -73,11 +84,24 @@ void efi_call_phys_epilog(void)
35041 {
35042 struct desc_ptr gdt_descr;
35043
35044+#ifdef CONFIG_PAX_KERNEXEC
35045+ struct desc_struct d;
35046+
35047+ memset(&d, 0, sizeof d);
35048+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
35049+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
35050+#endif
35051+
35052 gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
35053 gdt_descr.size = GDT_SIZE - 1;
35054 load_gdt(&gdt_descr);
35055
35056+#ifdef CONFIG_PAX_PER_CPU_PGD
35057+ load_cr3(get_cpu_pgd(smp_processor_id(), kernel));
35058+#else
35059 load_cr3(swapper_pg_dir);
35060+#endif
35061+
35062 __flush_tlb_all();
35063
35064 local_irq_restore(efi_rt_eflags);
35065diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c
35066index 666b74a..673d88f 100644
35067--- a/arch/x86/platform/efi/efi_64.c
35068+++ b/arch/x86/platform/efi/efi_64.c
35069@@ -97,6 +97,11 @@ void __init efi_call_phys_prelog(void)
35070 vaddress = (unsigned long)__va(pgd * PGDIR_SIZE);
35071 set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), *pgd_offset_k(vaddress));
35072 }
35073+
35074+#ifdef CONFIG_PAX_PER_CPU_PGD
35075+ load_cr3(swapper_pg_dir);
35076+#endif
35077+
35078 __flush_tlb_all();
35079 }
35080
35081@@ -114,6 +119,11 @@ void __init efi_call_phys_epilog(void)
35082 for (pgd = 0; pgd < n_pgds; pgd++)
35083 set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), save_pgd[pgd]);
35084 kfree(save_pgd);
35085+
35086+#ifdef CONFIG_PAX_PER_CPU_PGD
35087+ load_cr3(get_cpu_pgd(smp_processor_id(), kernel));
35088+#endif
35089+
35090 __flush_tlb_all();
35091 local_irq_restore(efi_flags);
35092 early_code_mapping_set_exec(0);
35093diff --git a/arch/x86/platform/efi/efi_stub_32.S b/arch/x86/platform/efi/efi_stub_32.S
35094index fbe66e6..eae5e38 100644
35095--- a/arch/x86/platform/efi/efi_stub_32.S
35096+++ b/arch/x86/platform/efi/efi_stub_32.S
35097@@ -6,7 +6,9 @@
35098 */
35099
35100 #include <linux/linkage.h>
35101+#include <linux/init.h>
35102 #include <asm/page_types.h>
35103+#include <asm/segment.h>
35104
35105 /*
35106 * efi_call_phys(void *, ...) is a function with variable parameters.
35107@@ -20,7 +22,7 @@
35108 * service functions will comply with gcc calling convention, too.
35109 */
35110
35111-.text
35112+__INIT
35113 ENTRY(efi_call_phys)
35114 /*
35115 * 0. The function can only be called in Linux kernel. So CS has been
35116@@ -36,10 +38,24 @@ ENTRY(efi_call_phys)
35117 * The mapping of lower virtual memory has been created in prelog and
35118 * epilog.
35119 */
35120- movl $1f, %edx
35121- subl $__PAGE_OFFSET, %edx
35122- jmp *%edx
35123+#ifdef CONFIG_PAX_KERNEXEC
35124+ movl $(__KERNEXEC_EFI_DS), %edx
35125+ mov %edx, %ds
35126+ mov %edx, %es
35127+ mov %edx, %ss
35128+ addl $2f,(1f)
35129+ ljmp *(1f)
35130+
35131+__INITDATA
35132+1: .long __LOAD_PHYSICAL_ADDR, __KERNEXEC_EFI_CS
35133+.previous
35134+
35135+2:
35136+ subl $2b,(1b)
35137+#else
35138+ jmp 1f-__PAGE_OFFSET
35139 1:
35140+#endif
35141
35142 /*
35143 * 2. Now on the top of stack is the return
35144@@ -47,14 +63,8 @@ ENTRY(efi_call_phys)
35145 * parameter 2, ..., param n. To make things easy, we save the return
35146 * address of efi_call_phys in a global variable.
35147 */
35148- popl %edx
35149- movl %edx, saved_return_addr
35150- /* get the function pointer into ECX*/
35151- popl %ecx
35152- movl %ecx, efi_rt_function_ptr
35153- movl $2f, %edx
35154- subl $__PAGE_OFFSET, %edx
35155- pushl %edx
35156+ popl (saved_return_addr)
35157+ popl (efi_rt_function_ptr)
35158
35159 /*
35160 * 3. Clear PG bit in %CR0.
35161@@ -73,9 +83,8 @@ ENTRY(efi_call_phys)
35162 /*
35163 * 5. Call the physical function.
35164 */
35165- jmp *%ecx
35166+ call *(efi_rt_function_ptr-__PAGE_OFFSET)
35167
35168-2:
35169 /*
35170 * 6. After EFI runtime service returns, control will return to
35171 * following instruction. We'd better readjust stack pointer first.
35172@@ -88,35 +97,36 @@ ENTRY(efi_call_phys)
35173 movl %cr0, %edx
35174 orl $0x80000000, %edx
35175 movl %edx, %cr0
35176- jmp 1f
35177-1:
35178+
35179 /*
35180 * 8. Now restore the virtual mode from flat mode by
35181 * adding EIP with PAGE_OFFSET.
35182 */
35183- movl $1f, %edx
35184- jmp *%edx
35185+#ifdef CONFIG_PAX_KERNEXEC
35186+ movl $(__KERNEL_DS), %edx
35187+ mov %edx, %ds
35188+ mov %edx, %es
35189+ mov %edx, %ss
35190+ ljmp $(__KERNEL_CS),$1f
35191+#else
35192+ jmp 1f+__PAGE_OFFSET
35193+#endif
35194 1:
35195
35196 /*
35197 * 9. Balance the stack. And because EAX contain the return value,
35198 * we'd better not clobber it.
35199 */
35200- leal efi_rt_function_ptr, %edx
35201- movl (%edx), %ecx
35202- pushl %ecx
35203+ pushl (efi_rt_function_ptr)
35204
35205 /*
35206- * 10. Push the saved return address onto the stack and return.
35207+ * 10. Return to the saved return address.
35208 */
35209- leal saved_return_addr, %edx
35210- movl (%edx), %ecx
35211- pushl %ecx
35212- ret
35213+ jmpl *(saved_return_addr)
35214 ENDPROC(efi_call_phys)
35215 .previous
35216
35217-.data
35218+__INITDATA
35219 saved_return_addr:
35220 .long 0
35221 efi_rt_function_ptr:
35222diff --git a/arch/x86/platform/efi/efi_stub_64.S b/arch/x86/platform/efi/efi_stub_64.S
35223index 88073b1..1cc2f53 100644
35224--- a/arch/x86/platform/efi/efi_stub_64.S
35225+++ b/arch/x86/platform/efi/efi_stub_64.S
35226@@ -7,6 +7,7 @@
35227 */
35228
35229 #include <linux/linkage.h>
35230+#include <asm/alternative-asm.h>
35231
35232 #define SAVE_XMM \
35233 mov %rsp, %rax; \
35234@@ -77,6 +78,7 @@ ENTRY(efi_call0)
35235 RESTORE_PGT
35236 addq $32, %rsp
35237 RESTORE_XMM
35238+ pax_force_retaddr 0, 1
35239 ret
35240 ENDPROC(efi_call0)
35241
35242@@ -89,6 +91,7 @@ ENTRY(efi_call1)
35243 RESTORE_PGT
35244 addq $32, %rsp
35245 RESTORE_XMM
35246+ pax_force_retaddr 0, 1
35247 ret
35248 ENDPROC(efi_call1)
35249
35250@@ -101,6 +104,7 @@ ENTRY(efi_call2)
35251 RESTORE_PGT
35252 addq $32, %rsp
35253 RESTORE_XMM
35254+ pax_force_retaddr 0, 1
35255 ret
35256 ENDPROC(efi_call2)
35257
35258@@ -114,6 +118,7 @@ ENTRY(efi_call3)
35259 RESTORE_PGT
35260 addq $32, %rsp
35261 RESTORE_XMM
35262+ pax_force_retaddr 0, 1
35263 ret
35264 ENDPROC(efi_call3)
35265
35266@@ -128,6 +133,7 @@ ENTRY(efi_call4)
35267 RESTORE_PGT
35268 addq $32, %rsp
35269 RESTORE_XMM
35270+ pax_force_retaddr 0, 1
35271 ret
35272 ENDPROC(efi_call4)
35273
35274@@ -143,6 +149,7 @@ ENTRY(efi_call5)
35275 RESTORE_PGT
35276 addq $48, %rsp
35277 RESTORE_XMM
35278+ pax_force_retaddr 0, 1
35279 ret
35280 ENDPROC(efi_call5)
35281
35282@@ -161,6 +168,7 @@ ENTRY(efi_call6)
35283 RESTORE_PGT
35284 addq $48, %rsp
35285 RESTORE_XMM
35286+ pax_force_retaddr 0, 1
35287 ret
35288 ENDPROC(efi_call6)
35289
35290diff --git a/arch/x86/platform/intel-mid/intel-mid.c b/arch/x86/platform/intel-mid/intel-mid.c
35291index 1bbedc4..eb795b5 100644
35292--- a/arch/x86/platform/intel-mid/intel-mid.c
35293+++ b/arch/x86/platform/intel-mid/intel-mid.c
35294@@ -71,9 +71,10 @@ static void intel_mid_power_off(void)
35295 {
35296 };
35297
35298-static void intel_mid_reboot(void)
35299+static void __noreturn intel_mid_reboot(void)
35300 {
35301 intel_scu_ipc_simple_command(IPCMSG_COLD_BOOT, 0);
35302+ BUG();
35303 }
35304
35305 static unsigned long __init intel_mid_calibrate_tsc(void)
35306diff --git a/arch/x86/platform/olpc/olpc_dt.c b/arch/x86/platform/olpc/olpc_dt.c
35307index d6ee929..3637cb5 100644
35308--- a/arch/x86/platform/olpc/olpc_dt.c
35309+++ b/arch/x86/platform/olpc/olpc_dt.c
35310@@ -156,7 +156,7 @@ void * __init prom_early_alloc(unsigned long size)
35311 return res;
35312 }
35313
35314-static struct of_pdt_ops prom_olpc_ops __initdata = {
35315+static struct of_pdt_ops prom_olpc_ops __initconst = {
35316 .nextprop = olpc_dt_nextprop,
35317 .getproplen = olpc_dt_getproplen,
35318 .getproperty = olpc_dt_getproperty,
35319diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
35320index 424f4c9..f2a2988 100644
35321--- a/arch/x86/power/cpu.c
35322+++ b/arch/x86/power/cpu.c
35323@@ -137,11 +137,8 @@ static void do_fpu_end(void)
35324 static void fix_processor_context(void)
35325 {
35326 int cpu = smp_processor_id();
35327- struct tss_struct *t = &per_cpu(init_tss, cpu);
35328-#ifdef CONFIG_X86_64
35329- struct desc_struct *desc = get_cpu_gdt_table(cpu);
35330- tss_desc tss;
35331-#endif
35332+ struct tss_struct *t = init_tss + cpu;
35333+
35334 set_tss_desc(cpu, t); /*
35335 * This just modifies memory; should not be
35336 * necessary. But... This is necessary, because
35337@@ -150,10 +147,6 @@ static void fix_processor_context(void)
35338 */
35339
35340 #ifdef CONFIG_X86_64
35341- memcpy(&tss, &desc[GDT_ENTRY_TSS], sizeof(tss_desc));
35342- tss.type = 0x9; /* The available 64-bit TSS (see AMD vol 2, pg 91 */
35343- write_gdt_entry(desc, GDT_ENTRY_TSS, &tss, DESC_TSS);
35344-
35345 syscall_init(); /* This sets MSR_*STAR and related */
35346 #endif
35347 load_TR_desc(); /* This does ltr */
35348diff --git a/arch/x86/realmode/init.c b/arch/x86/realmode/init.c
35349index bad628a..a102610 100644
35350--- a/arch/x86/realmode/init.c
35351+++ b/arch/x86/realmode/init.c
35352@@ -68,7 +68,13 @@ void __init setup_real_mode(void)
35353 __va(real_mode_header->trampoline_header);
35354
35355 #ifdef CONFIG_X86_32
35356- trampoline_header->start = __pa_symbol(startup_32_smp);
35357+ trampoline_header->start = __pa_symbol(ktla_ktva(startup_32_smp));
35358+
35359+#ifdef CONFIG_PAX_KERNEXEC
35360+ trampoline_header->start -= LOAD_PHYSICAL_ADDR;
35361+#endif
35362+
35363+ trampoline_header->boot_cs = __BOOT_CS;
35364 trampoline_header->gdt_limit = __BOOT_DS + 7;
35365 trampoline_header->gdt_base = __pa_symbol(boot_gdt);
35366 #else
35367@@ -84,7 +90,7 @@ void __init setup_real_mode(void)
35368 *trampoline_cr4_features = read_cr4();
35369
35370 trampoline_pgd = (u64 *) __va(real_mode_header->trampoline_pgd);
35371- trampoline_pgd[0] = init_level4_pgt[pgd_index(__PAGE_OFFSET)].pgd;
35372+ trampoline_pgd[0] = init_level4_pgt[pgd_index(__PAGE_OFFSET)].pgd & ~_PAGE_NX;
35373 trampoline_pgd[511] = init_level4_pgt[511].pgd;
35374 #endif
35375 }
35376diff --git a/arch/x86/realmode/rm/Makefile b/arch/x86/realmode/rm/Makefile
35377index 3497f14..cc73b92 100644
35378--- a/arch/x86/realmode/rm/Makefile
35379+++ b/arch/x86/realmode/rm/Makefile
35380@@ -66,5 +66,8 @@ $(obj)/realmode.relocs: $(obj)/realmode.elf FORCE
35381
35382 KBUILD_CFLAGS := $(LINUXINCLUDE) $(REALMODE_CFLAGS) -D_SETUP -D_WAKEUP \
35383 -I$(srctree)/arch/x86/boot
35384+ifdef CONSTIFY_PLUGIN
35385+KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
35386+endif
35387 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
35388 GCOV_PROFILE := n
35389diff --git a/arch/x86/realmode/rm/header.S b/arch/x86/realmode/rm/header.S
35390index a28221d..93c40f1 100644
35391--- a/arch/x86/realmode/rm/header.S
35392+++ b/arch/x86/realmode/rm/header.S
35393@@ -30,7 +30,9 @@ GLOBAL(real_mode_header)
35394 #endif
35395 /* APM/BIOS reboot */
35396 .long pa_machine_real_restart_asm
35397-#ifdef CONFIG_X86_64
35398+#ifdef CONFIG_X86_32
35399+ .long __KERNEL_CS
35400+#else
35401 .long __KERNEL32_CS
35402 #endif
35403 END(real_mode_header)
35404diff --git a/arch/x86/realmode/rm/trampoline_32.S b/arch/x86/realmode/rm/trampoline_32.S
35405index 48ddd76..c26749f 100644
35406--- a/arch/x86/realmode/rm/trampoline_32.S
35407+++ b/arch/x86/realmode/rm/trampoline_32.S
35408@@ -24,6 +24,12 @@
35409 #include <asm/page_types.h>
35410 #include "realmode.h"
35411
35412+#ifdef CONFIG_PAX_KERNEXEC
35413+#define ta(X) (X)
35414+#else
35415+#define ta(X) (pa_ ## X)
35416+#endif
35417+
35418 .text
35419 .code16
35420
35421@@ -38,8 +44,6 @@ ENTRY(trampoline_start)
35422
35423 cli # We should be safe anyway
35424
35425- movl tr_start, %eax # where we need to go
35426-
35427 movl $0xA5A5A5A5, trampoline_status
35428 # write marker for master knows we're running
35429
35430@@ -55,7 +59,7 @@ ENTRY(trampoline_start)
35431 movw $1, %dx # protected mode (PE) bit
35432 lmsw %dx # into protected mode
35433
35434- ljmpl $__BOOT_CS, $pa_startup_32
35435+ ljmpl *(trampoline_header)
35436
35437 .section ".text32","ax"
35438 .code32
35439@@ -66,7 +70,7 @@ ENTRY(startup_32) # note: also used from wakeup_asm.S
35440 .balign 8
35441 GLOBAL(trampoline_header)
35442 tr_start: .space 4
35443- tr_gdt_pad: .space 2
35444+ tr_boot_cs: .space 2
35445 tr_gdt: .space 6
35446 END(trampoline_header)
35447
35448diff --git a/arch/x86/realmode/rm/trampoline_64.S b/arch/x86/realmode/rm/trampoline_64.S
35449index dac7b20..72dbaca 100644
35450--- a/arch/x86/realmode/rm/trampoline_64.S
35451+++ b/arch/x86/realmode/rm/trampoline_64.S
35452@@ -93,6 +93,7 @@ ENTRY(startup_32)
35453 movl %edx, %gs
35454
35455 movl pa_tr_cr4, %eax
35456+ andl $~X86_CR4_PCIDE, %eax
35457 movl %eax, %cr4 # Enable PAE mode
35458
35459 # Setup trampoline 4 level pagetables
35460@@ -106,7 +107,7 @@ ENTRY(startup_32)
35461 wrmsr
35462
35463 # Enable paging and in turn activate Long Mode
35464- movl $(X86_CR0_PG | X86_CR0_WP | X86_CR0_PE), %eax
35465+ movl $(X86_CR0_PG | X86_CR0_PE), %eax
35466 movl %eax, %cr0
35467
35468 /*
35469diff --git a/arch/x86/realmode/rm/wakeup_asm.S b/arch/x86/realmode/rm/wakeup_asm.S
35470index 9e7e147..25a4158 100644
35471--- a/arch/x86/realmode/rm/wakeup_asm.S
35472+++ b/arch/x86/realmode/rm/wakeup_asm.S
35473@@ -126,11 +126,10 @@ ENTRY(wakeup_start)
35474 lgdtl pmode_gdt
35475
35476 /* This really couldn't... */
35477- movl pmode_entry, %eax
35478 movl pmode_cr0, %ecx
35479 movl %ecx, %cr0
35480- ljmpl $__KERNEL_CS, $pa_startup_32
35481- /* -> jmp *%eax in trampoline_32.S */
35482+
35483+ ljmpl *pmode_entry
35484 #else
35485 jmp trampoline_start
35486 #endif
35487diff --git a/arch/x86/tools/Makefile b/arch/x86/tools/Makefile
35488index e812034..c747134 100644
35489--- a/arch/x86/tools/Makefile
35490+++ b/arch/x86/tools/Makefile
35491@@ -37,7 +37,7 @@ $(obj)/test_get_len.o: $(srctree)/arch/x86/lib/insn.c $(srctree)/arch/x86/lib/in
35492
35493 $(obj)/insn_sanity.o: $(srctree)/arch/x86/lib/insn.c $(srctree)/arch/x86/lib/inat.c $(srctree)/arch/x86/include/asm/inat_types.h $(srctree)/arch/x86/include/asm/inat.h $(srctree)/arch/x86/include/asm/insn.h $(objtree)/arch/x86/lib/inat-tables.c
35494
35495-HOST_EXTRACFLAGS += -I$(srctree)/tools/include
35496+HOST_EXTRACFLAGS += -I$(srctree)/tools/include -ggdb
35497 hostprogs-y += relocs
35498 relocs-objs := relocs_32.o relocs_64.o relocs_common.o
35499 relocs: $(obj)/relocs
35500diff --git a/arch/x86/tools/relocs.c b/arch/x86/tools/relocs.c
35501index cfbdbdb..1aa763c 100644
35502--- a/arch/x86/tools/relocs.c
35503+++ b/arch/x86/tools/relocs.c
35504@@ -1,5 +1,7 @@
35505 /* This is included from relocs_32/64.c */
35506
35507+#include "../../../include/generated/autoconf.h"
35508+
35509 #define ElfW(type) _ElfW(ELF_BITS, type)
35510 #define _ElfW(bits, type) __ElfW(bits, type)
35511 #define __ElfW(bits, type) Elf##bits##_##type
35512@@ -11,6 +13,7 @@
35513 #define Elf_Sym ElfW(Sym)
35514
35515 static Elf_Ehdr ehdr;
35516+static Elf_Phdr *phdr;
35517
35518 struct relocs {
35519 uint32_t *offset;
35520@@ -383,9 +386,39 @@ static void read_ehdr(FILE *fp)
35521 }
35522 }
35523
35524+static void read_phdrs(FILE *fp)
35525+{
35526+ unsigned int i;
35527+
35528+ phdr = calloc(ehdr.e_phnum, sizeof(Elf_Phdr));
35529+ if (!phdr) {
35530+ die("Unable to allocate %d program headers\n",
35531+ ehdr.e_phnum);
35532+ }
35533+ if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
35534+ die("Seek to %d failed: %s\n",
35535+ ehdr.e_phoff, strerror(errno));
35536+ }
35537+ if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
35538+ die("Cannot read ELF program headers: %s\n",
35539+ strerror(errno));
35540+ }
35541+ for(i = 0; i < ehdr.e_phnum; i++) {
35542+ phdr[i].p_type = elf_word_to_cpu(phdr[i].p_type);
35543+ phdr[i].p_offset = elf_off_to_cpu(phdr[i].p_offset);
35544+ phdr[i].p_vaddr = elf_addr_to_cpu(phdr[i].p_vaddr);
35545+ phdr[i].p_paddr = elf_addr_to_cpu(phdr[i].p_paddr);
35546+ phdr[i].p_filesz = elf_word_to_cpu(phdr[i].p_filesz);
35547+ phdr[i].p_memsz = elf_word_to_cpu(phdr[i].p_memsz);
35548+ phdr[i].p_flags = elf_word_to_cpu(phdr[i].p_flags);
35549+ phdr[i].p_align = elf_word_to_cpu(phdr[i].p_align);
35550+ }
35551+
35552+}
35553+
35554 static void read_shdrs(FILE *fp)
35555 {
35556- int i;
35557+ unsigned int i;
35558 Elf_Shdr shdr;
35559
35560 secs = calloc(ehdr.e_shnum, sizeof(struct section));
35561@@ -420,7 +453,7 @@ static void read_shdrs(FILE *fp)
35562
35563 static void read_strtabs(FILE *fp)
35564 {
35565- int i;
35566+ unsigned int i;
35567 for (i = 0; i < ehdr.e_shnum; i++) {
35568 struct section *sec = &secs[i];
35569 if (sec->shdr.sh_type != SHT_STRTAB) {
35570@@ -445,7 +478,7 @@ static void read_strtabs(FILE *fp)
35571
35572 static void read_symtabs(FILE *fp)
35573 {
35574- int i,j;
35575+ unsigned int i,j;
35576 for (i = 0; i < ehdr.e_shnum; i++) {
35577 struct section *sec = &secs[i];
35578 if (sec->shdr.sh_type != SHT_SYMTAB) {
35579@@ -476,9 +509,11 @@ static void read_symtabs(FILE *fp)
35580 }
35581
35582
35583-static void read_relocs(FILE *fp)
35584+static void read_relocs(FILE *fp, int use_real_mode)
35585 {
35586- int i,j;
35587+ unsigned int i,j;
35588+ uint32_t base;
35589+
35590 for (i = 0; i < ehdr.e_shnum; i++) {
35591 struct section *sec = &secs[i];
35592 if (sec->shdr.sh_type != SHT_REL_TYPE) {
35593@@ -498,9 +533,22 @@ static void read_relocs(FILE *fp)
35594 die("Cannot read symbol table: %s\n",
35595 strerror(errno));
35596 }
35597+ base = 0;
35598+
35599+#ifdef CONFIG_X86_32
35600+ for (j = 0; !use_real_mode && j < ehdr.e_phnum; j++) {
35601+ if (phdr[j].p_type != PT_LOAD )
35602+ continue;
35603+ if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
35604+ continue;
35605+ base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
35606+ break;
35607+ }
35608+#endif
35609+
35610 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf_Rel); j++) {
35611 Elf_Rel *rel = &sec->reltab[j];
35612- rel->r_offset = elf_addr_to_cpu(rel->r_offset);
35613+ rel->r_offset = elf_addr_to_cpu(rel->r_offset) + base;
35614 rel->r_info = elf_xword_to_cpu(rel->r_info);
35615 #if (SHT_REL_TYPE == SHT_RELA)
35616 rel->r_addend = elf_xword_to_cpu(rel->r_addend);
35617@@ -512,7 +560,7 @@ static void read_relocs(FILE *fp)
35618
35619 static void print_absolute_symbols(void)
35620 {
35621- int i;
35622+ unsigned int i;
35623 const char *format;
35624
35625 if (ELF_BITS == 64)
35626@@ -525,7 +573,7 @@ static void print_absolute_symbols(void)
35627 for (i = 0; i < ehdr.e_shnum; i++) {
35628 struct section *sec = &secs[i];
35629 char *sym_strtab;
35630- int j;
35631+ unsigned int j;
35632
35633 if (sec->shdr.sh_type != SHT_SYMTAB) {
35634 continue;
35635@@ -552,7 +600,7 @@ static void print_absolute_symbols(void)
35636
35637 static void print_absolute_relocs(void)
35638 {
35639- int i, printed = 0;
35640+ unsigned int i, printed = 0;
35641 const char *format;
35642
35643 if (ELF_BITS == 64)
35644@@ -565,7 +613,7 @@ static void print_absolute_relocs(void)
35645 struct section *sec_applies, *sec_symtab;
35646 char *sym_strtab;
35647 Elf_Sym *sh_symtab;
35648- int j;
35649+ unsigned int j;
35650 if (sec->shdr.sh_type != SHT_REL_TYPE) {
35651 continue;
35652 }
35653@@ -642,13 +690,13 @@ static void add_reloc(struct relocs *r, uint32_t offset)
35654 static void walk_relocs(int (*process)(struct section *sec, Elf_Rel *rel,
35655 Elf_Sym *sym, const char *symname))
35656 {
35657- int i;
35658+ unsigned int i;
35659 /* Walk through the relocations */
35660 for (i = 0; i < ehdr.e_shnum; i++) {
35661 char *sym_strtab;
35662 Elf_Sym *sh_symtab;
35663 struct section *sec_applies, *sec_symtab;
35664- int j;
35665+ unsigned int j;
35666 struct section *sec = &secs[i];
35667
35668 if (sec->shdr.sh_type != SHT_REL_TYPE) {
35669@@ -822,6 +870,23 @@ static int do_reloc32(struct section *sec, Elf_Rel *rel, Elf_Sym *sym,
35670 {
35671 unsigned r_type = ELF32_R_TYPE(rel->r_info);
35672 int shn_abs = (sym->st_shndx == SHN_ABS) && !is_reloc(S_REL, symname);
35673+ char *sym_strtab = sec->link->link->strtab;
35674+
35675+ /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
35676+ if (!strcmp(sec_name(sym->st_shndx), ".data..percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
35677+ return 0;
35678+
35679+#ifdef CONFIG_PAX_KERNEXEC
35680+ /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
35681+ if (!strcmp(sec_name(sym->st_shndx), ".text.end") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
35682+ return 0;
35683+ if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
35684+ return 0;
35685+ if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
35686+ return 0;
35687+ if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
35688+ return 0;
35689+#endif
35690
35691 switch (r_type) {
35692 case R_386_NONE:
35693@@ -960,7 +1025,7 @@ static int write32_as_text(uint32_t v, FILE *f)
35694
35695 static void emit_relocs(int as_text, int use_real_mode)
35696 {
35697- int i;
35698+ unsigned int i;
35699 int (*write_reloc)(uint32_t, FILE *) = write32;
35700 int (*do_reloc)(struct section *sec, Elf_Rel *rel, Elf_Sym *sym,
35701 const char *symname);
35702@@ -1060,10 +1125,11 @@ void process(FILE *fp, int use_real_mode, int as_text,
35703 {
35704 regex_init(use_real_mode);
35705 read_ehdr(fp);
35706+ read_phdrs(fp);
35707 read_shdrs(fp);
35708 read_strtabs(fp);
35709 read_symtabs(fp);
35710- read_relocs(fp);
35711+ read_relocs(fp, use_real_mode);
35712 if (ELF_BITS == 64)
35713 percpu_init();
35714 if (show_absolute_syms) {
35715diff --git a/arch/x86/um/tls_32.c b/arch/x86/um/tls_32.c
35716index 80ffa5b..a33bd15 100644
35717--- a/arch/x86/um/tls_32.c
35718+++ b/arch/x86/um/tls_32.c
35719@@ -260,7 +260,7 @@ out:
35720 if (unlikely(task == current &&
35721 !t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].flushed)) {
35722 printk(KERN_ERR "get_tls_entry: task with pid %d got here "
35723- "without flushed TLS.", current->pid);
35724+ "without flushed TLS.", task_pid_nr(current));
35725 }
35726
35727 return 0;
35728diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile
35729index fd14be1..e3c79c0 100644
35730--- a/arch/x86/vdso/Makefile
35731+++ b/arch/x86/vdso/Makefile
35732@@ -181,7 +181,7 @@ quiet_cmd_vdso = VDSO $@
35733 -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \
35734 sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
35735
35736-VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
35737+VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
35738 GCOV_PROFILE := n
35739
35740 #
35741diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c
35742index f1d633a..a75c5f7 100644
35743--- a/arch/x86/vdso/vdso32-setup.c
35744+++ b/arch/x86/vdso/vdso32-setup.c
35745@@ -25,6 +25,7 @@
35746 #include <asm/tlbflush.h>
35747 #include <asm/vdso.h>
35748 #include <asm/proto.h>
35749+#include <asm/mman.h>
35750
35751 enum {
35752 VDSO_DISABLED = 0,
35753@@ -227,7 +228,7 @@ static inline void map_compat_vdso(int map)
35754 void enable_sep_cpu(void)
35755 {
35756 int cpu = get_cpu();
35757- struct tss_struct *tss = &per_cpu(init_tss, cpu);
35758+ struct tss_struct *tss = init_tss + cpu;
35759
35760 if (!boot_cpu_has(X86_FEATURE_SEP)) {
35761 put_cpu();
35762@@ -250,7 +251,7 @@ static int __init gate_vma_init(void)
35763 gate_vma.vm_start = FIXADDR_USER_START;
35764 gate_vma.vm_end = FIXADDR_USER_END;
35765 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
35766- gate_vma.vm_page_prot = __P101;
35767+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
35768
35769 return 0;
35770 }
35771@@ -331,14 +332,14 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
35772 if (compat)
35773 addr = VDSO_HIGH_BASE;
35774 else {
35775- addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
35776+ addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, MAP_EXECUTABLE);
35777 if (IS_ERR_VALUE(addr)) {
35778 ret = addr;
35779 goto up_fail;
35780 }
35781 }
35782
35783- current->mm->context.vdso = (void *)addr;
35784+ current->mm->context.vdso = addr;
35785
35786 if (compat_uses_vma || !compat) {
35787 /*
35788@@ -354,11 +355,11 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
35789 }
35790
35791 current_thread_info()->sysenter_return =
35792- VDSO32_SYMBOL(addr, SYSENTER_RETURN);
35793+ (__force void __user *)VDSO32_SYMBOL(addr, SYSENTER_RETURN);
35794
35795 up_fail:
35796 if (ret)
35797- current->mm->context.vdso = NULL;
35798+ current->mm->context.vdso = 0;
35799
35800 up_write(&mm->mmap_sem);
35801
35802@@ -412,8 +413,14 @@ __initcall(ia32_binfmt_init);
35803
35804 const char *arch_vma_name(struct vm_area_struct *vma)
35805 {
35806- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
35807+ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
35808 return "[vdso]";
35809+
35810+#ifdef CONFIG_PAX_SEGMEXEC
35811+ if (vma->vm_mm && vma->vm_mirror && vma->vm_mirror->vm_start == vma->vm_mm->context.vdso)
35812+ return "[vdso]";
35813+#endif
35814+
35815 return NULL;
35816 }
35817
35818@@ -423,7 +430,7 @@ struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
35819 * Check to see if the corresponding task was created in compat vdso
35820 * mode.
35821 */
35822- if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE)
35823+ if (mm && mm->context.vdso == VDSO_HIGH_BASE)
35824 return &gate_vma;
35825 return NULL;
35826 }
35827diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c
35828index 431e875..cbb23f3 100644
35829--- a/arch/x86/vdso/vma.c
35830+++ b/arch/x86/vdso/vma.c
35831@@ -16,8 +16,6 @@
35832 #include <asm/vdso.h>
35833 #include <asm/page.h>
35834
35835-unsigned int __read_mostly vdso_enabled = 1;
35836-
35837 extern char vdso_start[], vdso_end[];
35838 extern unsigned short vdso_sync_cpuid;
35839
35840@@ -141,7 +139,6 @@ static unsigned long vdso_addr(unsigned long start, unsigned len)
35841 * unaligned here as a result of stack start randomization.
35842 */
35843 addr = PAGE_ALIGN(addr);
35844- addr = align_vdso_addr(addr);
35845
35846 return addr;
35847 }
35848@@ -154,30 +151,31 @@ static int setup_additional_pages(struct linux_binprm *bprm,
35849 unsigned size)
35850 {
35851 struct mm_struct *mm = current->mm;
35852- unsigned long addr;
35853+ unsigned long addr = 0;
35854 int ret;
35855
35856- if (!vdso_enabled)
35857- return 0;
35858-
35859 down_write(&mm->mmap_sem);
35860+
35861+#ifdef CONFIG_PAX_RANDMMAP
35862+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
35863+#endif
35864+
35865 addr = vdso_addr(mm->start_stack, size);
35866+ addr = align_vdso_addr(addr);
35867 addr = get_unmapped_area(NULL, addr, size, 0, 0);
35868 if (IS_ERR_VALUE(addr)) {
35869 ret = addr;
35870 goto up_fail;
35871 }
35872
35873- current->mm->context.vdso = (void *)addr;
35874+ mm->context.vdso = addr;
35875
35876 ret = install_special_mapping(mm, addr, size,
35877 VM_READ|VM_EXEC|
35878 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
35879 pages);
35880- if (ret) {
35881- current->mm->context.vdso = NULL;
35882- goto up_fail;
35883- }
35884+ if (ret)
35885+ mm->context.vdso = 0;
35886
35887 up_fail:
35888 up_write(&mm->mmap_sem);
35889@@ -197,10 +195,3 @@ int x32_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
35890 vdsox32_size);
35891 }
35892 #endif
35893-
35894-static __init int vdso_setup(char *s)
35895-{
35896- vdso_enabled = simple_strtoul(s, NULL, 0);
35897- return 0;
35898-}
35899-__setup("vdso=", vdso_setup);
35900diff --git a/arch/x86/xen/Kconfig b/arch/x86/xen/Kconfig
35901index 01b9026..1e476df 100644
35902--- a/arch/x86/xen/Kconfig
35903+++ b/arch/x86/xen/Kconfig
35904@@ -9,6 +9,7 @@ config XEN
35905 select XEN_HAVE_PVMMU
35906 depends on X86_64 || (X86_32 && X86_PAE && !X86_VISWS)
35907 depends on X86_TSC
35908+ depends on !GRKERNSEC_CONFIG_AUTO || GRKERNSEC_CONFIG_VIRT_XEN
35909 help
35910 This is the Linux Xen port. Enabling this will allow the
35911 kernel to boot in a paravirtualized environment under the
35912diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
35913index 201d09a..e4723e5 100644
35914--- a/arch/x86/xen/enlighten.c
35915+++ b/arch/x86/xen/enlighten.c
35916@@ -123,8 +123,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
35917
35918 struct shared_info xen_dummy_shared_info;
35919
35920-void *xen_initial_gdt;
35921-
35922 RESERVE_BRK(shared_info_page_brk, PAGE_SIZE);
35923 __read_mostly int xen_have_vector_callback;
35924 EXPORT_SYMBOL_GPL(xen_have_vector_callback);
35925@@ -542,8 +540,7 @@ static void xen_load_gdt(const struct desc_ptr *dtr)
35926 {
35927 unsigned long va = dtr->address;
35928 unsigned int size = dtr->size + 1;
35929- unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
35930- unsigned long frames[pages];
35931+ unsigned long frames[65536 / PAGE_SIZE];
35932 int f;
35933
35934 /*
35935@@ -591,8 +588,7 @@ static void __init xen_load_gdt_boot(const struct desc_ptr *dtr)
35936 {
35937 unsigned long va = dtr->address;
35938 unsigned int size = dtr->size + 1;
35939- unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
35940- unsigned long frames[pages];
35941+ unsigned long frames[(GDT_SIZE + PAGE_SIZE - 1) / PAGE_SIZE];
35942 int f;
35943
35944 /*
35945@@ -600,7 +596,7 @@ static void __init xen_load_gdt_boot(const struct desc_ptr *dtr)
35946 * 8-byte entries, or 16 4k pages..
35947 */
35948
35949- BUG_ON(size > 65536);
35950+ BUG_ON(size > GDT_SIZE);
35951 BUG_ON(va & ~PAGE_MASK);
35952
35953 for (f = 0; va < dtr->address + size; va += PAGE_SIZE, f++) {
35954@@ -989,7 +985,7 @@ static u32 xen_safe_apic_wait_icr_idle(void)
35955 return 0;
35956 }
35957
35958-static void set_xen_basic_apic_ops(void)
35959+static void __init set_xen_basic_apic_ops(void)
35960 {
35961 apic->read = xen_apic_read;
35962 apic->write = xen_apic_write;
35963@@ -1295,30 +1291,30 @@ static const struct pv_apic_ops xen_apic_ops __initconst = {
35964 #endif
35965 };
35966
35967-static void xen_reboot(int reason)
35968+static __noreturn void xen_reboot(int reason)
35969 {
35970 struct sched_shutdown r = { .reason = reason };
35971
35972- if (HYPERVISOR_sched_op(SCHEDOP_shutdown, &r))
35973- BUG();
35974+ HYPERVISOR_sched_op(SCHEDOP_shutdown, &r);
35975+ BUG();
35976 }
35977
35978-static void xen_restart(char *msg)
35979+static __noreturn void xen_restart(char *msg)
35980 {
35981 xen_reboot(SHUTDOWN_reboot);
35982 }
35983
35984-static void xen_emergency_restart(void)
35985+static __noreturn void xen_emergency_restart(void)
35986 {
35987 xen_reboot(SHUTDOWN_reboot);
35988 }
35989
35990-static void xen_machine_halt(void)
35991+static __noreturn void xen_machine_halt(void)
35992 {
35993 xen_reboot(SHUTDOWN_poweroff);
35994 }
35995
35996-static void xen_machine_power_off(void)
35997+static __noreturn void xen_machine_power_off(void)
35998 {
35999 if (pm_power_off)
36000 pm_power_off();
36001@@ -1564,7 +1560,17 @@ asmlinkage void __init xen_start_kernel(void)
36002 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
36003
36004 /* Work out if we support NX */
36005- x86_configure_nx();
36006+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
36007+ if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
36008+ (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
36009+ unsigned l, h;
36010+
36011+ __supported_pte_mask |= _PAGE_NX;
36012+ rdmsr(MSR_EFER, l, h);
36013+ l |= EFER_NX;
36014+ wrmsr(MSR_EFER, l, h);
36015+ }
36016+#endif
36017
36018 /* Get mfn list */
36019 xen_build_dynamic_phys_to_machine();
36020@@ -1592,13 +1598,6 @@ asmlinkage void __init xen_start_kernel(void)
36021
36022 machine_ops = xen_machine_ops;
36023
36024- /*
36025- * The only reliable way to retain the initial address of the
36026- * percpu gdt_page is to remember it here, so we can go and
36027- * mark it RW later, when the initial percpu area is freed.
36028- */
36029- xen_initial_gdt = &per_cpu(gdt_page, 0);
36030-
36031 xen_smp_init();
36032
36033 #ifdef CONFIG_ACPI_NUMA
36034diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
36035index 2423ef0..4f6fb5b 100644
36036--- a/arch/x86/xen/mmu.c
36037+++ b/arch/x86/xen/mmu.c
36038@@ -379,7 +379,7 @@ static pteval_t pte_mfn_to_pfn(pteval_t val)
36039 return val;
36040 }
36041
36042-static pteval_t pte_pfn_to_mfn(pteval_t val)
36043+static pteval_t __intentional_overflow(-1) pte_pfn_to_mfn(pteval_t val)
36044 {
36045 if (val & _PAGE_PRESENT) {
36046 unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
36047@@ -1904,6 +1904,9 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
36048 /* L3_k[510] -> level2_kernel_pgt
36049 * L3_i[511] -> level2_fixmap_pgt */
36050 convert_pfn_mfn(level3_kernel_pgt);
36051+ convert_pfn_mfn(level3_vmalloc_start_pgt);
36052+ convert_pfn_mfn(level3_vmalloc_end_pgt);
36053+ convert_pfn_mfn(level3_vmemmap_pgt);
36054 }
36055 /* We get [511][511] and have Xen's version of level2_kernel_pgt */
36056 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
36057@@ -1933,8 +1936,12 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
36058 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
36059 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
36060 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
36061+ set_page_prot(level3_vmalloc_start_pgt, PAGE_KERNEL_RO);
36062+ set_page_prot(level3_vmalloc_end_pgt, PAGE_KERNEL_RO);
36063+ set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
36064 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
36065 set_page_prot(level2_ident_pgt, PAGE_KERNEL_RO);
36066+ set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
36067 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
36068 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
36069
36070@@ -2123,6 +2130,7 @@ static void __init xen_post_allocator_init(void)
36071 pv_mmu_ops.set_pud = xen_set_pud;
36072 #if PAGETABLE_LEVELS == 4
36073 pv_mmu_ops.set_pgd = xen_set_pgd;
36074+ pv_mmu_ops.set_pgd_batched = xen_set_pgd;
36075 #endif
36076
36077 /* This will work as long as patching hasn't happened yet
36078@@ -2201,6 +2209,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = {
36079 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
36080 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
36081 .set_pgd = xen_set_pgd_hyper,
36082+ .set_pgd_batched = xen_set_pgd_hyper,
36083
36084 .alloc_pud = xen_alloc_pmd_init,
36085 .release_pud = xen_release_pmd_init,
36086diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
36087index a18eadd..2e2f10e 100644
36088--- a/arch/x86/xen/smp.c
36089+++ b/arch/x86/xen/smp.c
36090@@ -283,17 +283,13 @@ static void __init xen_smp_prepare_boot_cpu(void)
36091
36092 if (xen_pv_domain()) {
36093 if (!xen_feature(XENFEAT_writable_page_tables))
36094- /* We've switched to the "real" per-cpu gdt, so make
36095- * sure the old memory can be recycled. */
36096- make_lowmem_page_readwrite(xen_initial_gdt);
36097-
36098 #ifdef CONFIG_X86_32
36099 /*
36100 * Xen starts us with XEN_FLAT_RING1_DS, but linux code
36101 * expects __USER_DS
36102 */
36103- loadsegment(ds, __USER_DS);
36104- loadsegment(es, __USER_DS);
36105+ loadsegment(ds, __KERNEL_DS);
36106+ loadsegment(es, __KERNEL_DS);
36107 #endif
36108
36109 xen_filter_cpu_maps();
36110@@ -372,7 +368,7 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
36111 #ifdef CONFIG_X86_32
36112 /* Note: PVH is not yet supported on x86_32. */
36113 ctxt->user_regs.fs = __KERNEL_PERCPU;
36114- ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
36115+ savesegment(gs, ctxt->user_regs.gs);
36116 #endif
36117 ctxt->user_regs.eip = (unsigned long)cpu_bringup_and_idle;
36118
36119@@ -381,8 +377,8 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
36120 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
36121 ctxt->flags = VGCF_IN_KERNEL;
36122 ctxt->user_regs.eflags = 0x1000; /* IOPL_RING1 */
36123- ctxt->user_regs.ds = __USER_DS;
36124- ctxt->user_regs.es = __USER_DS;
36125+ ctxt->user_regs.ds = __KERNEL_DS;
36126+ ctxt->user_regs.es = __KERNEL_DS;
36127 ctxt->user_regs.ss = __KERNEL_DS;
36128
36129 xen_copy_trap_info(ctxt->trap_ctxt);
36130@@ -437,13 +433,12 @@ static int xen_cpu_up(unsigned int cpu, struct task_struct *idle)
36131 int rc;
36132
36133 per_cpu(current_task, cpu) = idle;
36134+ per_cpu(current_tinfo, cpu) = &idle->tinfo;
36135 #ifdef CONFIG_X86_32
36136 irq_ctx_init(cpu);
36137 #else
36138 clear_tsk_thread_flag(idle, TIF_FORK);
36139- per_cpu(kernel_stack, cpu) =
36140- (unsigned long)task_stack_page(idle) -
36141- KERNEL_STACK_OFFSET + THREAD_SIZE;
36142+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
36143 #endif
36144 xen_setup_runstate_info(cpu);
36145 xen_setup_timer(cpu);
36146@@ -719,7 +714,7 @@ static const struct smp_ops xen_smp_ops __initconst = {
36147
36148 void __init xen_smp_init(void)
36149 {
36150- smp_ops = xen_smp_ops;
36151+ memcpy((void *)&smp_ops, &xen_smp_ops, sizeof smp_ops);
36152 xen_fill_possible_map();
36153 }
36154
36155diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S
36156index 33ca6e4..0ded929 100644
36157--- a/arch/x86/xen/xen-asm_32.S
36158+++ b/arch/x86/xen/xen-asm_32.S
36159@@ -84,14 +84,14 @@ ENTRY(xen_iret)
36160 ESP_OFFSET=4 # bytes pushed onto stack
36161
36162 /*
36163- * Store vcpu_info pointer for easy access. Do it this way to
36164- * avoid having to reload %fs
36165+ * Store vcpu_info pointer for easy access.
36166 */
36167 #ifdef CONFIG_SMP
36168- GET_THREAD_INFO(%eax)
36169- movl %ss:TI_cpu(%eax), %eax
36170- movl %ss:__per_cpu_offset(,%eax,4), %eax
36171- mov %ss:xen_vcpu(%eax), %eax
36172+ push %fs
36173+ mov $(__KERNEL_PERCPU), %eax
36174+ mov %eax, %fs
36175+ mov PER_CPU_VAR(xen_vcpu), %eax
36176+ pop %fs
36177 #else
36178 movl %ss:xen_vcpu, %eax
36179 #endif
36180diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S
36181index 485b695..fda3e7c 100644
36182--- a/arch/x86/xen/xen-head.S
36183+++ b/arch/x86/xen/xen-head.S
36184@@ -39,6 +39,17 @@ ENTRY(startup_xen)
36185 #ifdef CONFIG_X86_32
36186 mov %esi,xen_start_info
36187 mov $init_thread_union+THREAD_SIZE,%esp
36188+#ifdef CONFIG_SMP
36189+ movl $cpu_gdt_table,%edi
36190+ movl $__per_cpu_load,%eax
36191+ movw %ax,__KERNEL_PERCPU + 2(%edi)
36192+ rorl $16,%eax
36193+ movb %al,__KERNEL_PERCPU + 4(%edi)
36194+ movb %ah,__KERNEL_PERCPU + 7(%edi)
36195+ movl $__per_cpu_end - 1,%eax
36196+ subl $__per_cpu_start,%eax
36197+ movw %ax,__KERNEL_PERCPU + 0(%edi)
36198+#endif
36199 #else
36200 mov %rsi,xen_start_info
36201 mov $init_thread_union+THREAD_SIZE,%rsp
36202diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
36203index 1cb6f4c..9981524 100644
36204--- a/arch/x86/xen/xen-ops.h
36205+++ b/arch/x86/xen/xen-ops.h
36206@@ -10,8 +10,6 @@
36207 extern const char xen_hypervisor_callback[];
36208 extern const char xen_failsafe_callback[];
36209
36210-extern void *xen_initial_gdt;
36211-
36212 struct trap_info;
36213 void xen_copy_trap_info(struct trap_info *traps);
36214
36215diff --git a/arch/xtensa/variants/dc232b/include/variant/core.h b/arch/xtensa/variants/dc232b/include/variant/core.h
36216index 525bd3d..ef888b1 100644
36217--- a/arch/xtensa/variants/dc232b/include/variant/core.h
36218+++ b/arch/xtensa/variants/dc232b/include/variant/core.h
36219@@ -119,9 +119,9 @@
36220 ----------------------------------------------------------------------*/
36221
36222 #define XCHAL_ICACHE_LINESIZE 32 /* I-cache line size in bytes */
36223-#define XCHAL_DCACHE_LINESIZE 32 /* D-cache line size in bytes */
36224 #define XCHAL_ICACHE_LINEWIDTH 5 /* log2(I line size in bytes) */
36225 #define XCHAL_DCACHE_LINEWIDTH 5 /* log2(D line size in bytes) */
36226+#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
36227
36228 #define XCHAL_ICACHE_SIZE 16384 /* I-cache size in bytes or 0 */
36229 #define XCHAL_DCACHE_SIZE 16384 /* D-cache size in bytes or 0 */
36230diff --git a/arch/xtensa/variants/fsf/include/variant/core.h b/arch/xtensa/variants/fsf/include/variant/core.h
36231index 2f33760..835e50a 100644
36232--- a/arch/xtensa/variants/fsf/include/variant/core.h
36233+++ b/arch/xtensa/variants/fsf/include/variant/core.h
36234@@ -11,6 +11,7 @@
36235 #ifndef _XTENSA_CORE_H
36236 #define _XTENSA_CORE_H
36237
36238+#include <linux/const.h>
36239
36240 /****************************************************************************
36241 Parameters Useful for Any Code, USER or PRIVILEGED
36242@@ -112,9 +113,9 @@
36243 ----------------------------------------------------------------------*/
36244
36245 #define XCHAL_ICACHE_LINESIZE 16 /* I-cache line size in bytes */
36246-#define XCHAL_DCACHE_LINESIZE 16 /* D-cache line size in bytes */
36247 #define XCHAL_ICACHE_LINEWIDTH 4 /* log2(I line size in bytes) */
36248 #define XCHAL_DCACHE_LINEWIDTH 4 /* log2(D line size in bytes) */
36249+#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
36250
36251 #define XCHAL_ICACHE_SIZE 8192 /* I-cache size in bytes or 0 */
36252 #define XCHAL_DCACHE_SIZE 8192 /* D-cache size in bytes or 0 */
36253diff --git a/arch/xtensa/variants/s6000/include/variant/core.h b/arch/xtensa/variants/s6000/include/variant/core.h
36254index af00795..2bb8105 100644
36255--- a/arch/xtensa/variants/s6000/include/variant/core.h
36256+++ b/arch/xtensa/variants/s6000/include/variant/core.h
36257@@ -11,6 +11,7 @@
36258 #ifndef _XTENSA_CORE_CONFIGURATION_H
36259 #define _XTENSA_CORE_CONFIGURATION_H
36260
36261+#include <linux/const.h>
36262
36263 /****************************************************************************
36264 Parameters Useful for Any Code, USER or PRIVILEGED
36265@@ -118,9 +119,9 @@
36266 ----------------------------------------------------------------------*/
36267
36268 #define XCHAL_ICACHE_LINESIZE 16 /* I-cache line size in bytes */
36269-#define XCHAL_DCACHE_LINESIZE 16 /* D-cache line size in bytes */
36270 #define XCHAL_ICACHE_LINEWIDTH 4 /* log2(I line size in bytes) */
36271 #define XCHAL_DCACHE_LINEWIDTH 4 /* log2(D line size in bytes) */
36272+#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
36273
36274 #define XCHAL_ICACHE_SIZE 32768 /* I-cache size in bytes or 0 */
36275 #define XCHAL_DCACHE_SIZE 32768 /* D-cache size in bytes or 0 */
36276diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
36277index 4e491d9..c8e18e4 100644
36278--- a/block/blk-cgroup.c
36279+++ b/block/blk-cgroup.c
36280@@ -812,7 +812,7 @@ static void blkcg_css_free(struct cgroup_subsys_state *css)
36281 static struct cgroup_subsys_state *
36282 blkcg_css_alloc(struct cgroup_subsys_state *parent_css)
36283 {
36284- static atomic64_t id_seq = ATOMIC64_INIT(0);
36285+ static atomic64_unchecked_t id_seq = ATOMIC64_INIT(0);
36286 struct blkcg *blkcg;
36287
36288 if (!parent_css) {
36289@@ -826,7 +826,7 @@ blkcg_css_alloc(struct cgroup_subsys_state *parent_css)
36290
36291 blkcg->cfq_weight = CFQ_WEIGHT_DEFAULT;
36292 blkcg->cfq_leaf_weight = CFQ_WEIGHT_DEFAULT;
36293- blkcg->id = atomic64_inc_return(&id_seq); /* root is 0, start from 1 */
36294+ blkcg->id = atomic64_inc_return_unchecked(&id_seq); /* root is 0, start from 1 */
36295 done:
36296 spin_lock_init(&blkcg->lock);
36297 INIT_RADIX_TREE(&blkcg->blkg_tree, GFP_ATOMIC);
36298diff --git a/block/blk-iopoll.c b/block/blk-iopoll.c
36299index 1855bf5..af12b06 100644
36300--- a/block/blk-iopoll.c
36301+++ b/block/blk-iopoll.c
36302@@ -77,7 +77,7 @@ void blk_iopoll_complete(struct blk_iopoll *iopoll)
36303 }
36304 EXPORT_SYMBOL(blk_iopoll_complete);
36305
36306-static void blk_iopoll_softirq(struct softirq_action *h)
36307+static __latent_entropy void blk_iopoll_softirq(void)
36308 {
36309 struct list_head *list = this_cpu_ptr(&blk_cpu_iopoll);
36310 int rearm = 0, budget = blk_iopoll_budget;
36311diff --git a/block/blk-map.c b/block/blk-map.c
36312index ae4ae10..c470b8d 100644
36313--- a/block/blk-map.c
36314+++ b/block/blk-map.c
36315@@ -302,7 +302,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
36316 if (!len || !kbuf)
36317 return -EINVAL;
36318
36319- do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
36320+ do_copy = !blk_rq_aligned(q, addr, len) || object_starts_on_stack(kbuf);
36321 if (do_copy)
36322 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
36323 else
36324diff --git a/block/blk-softirq.c b/block/blk-softirq.c
36325index 57790c1..5e988dd 100644
36326--- a/block/blk-softirq.c
36327+++ b/block/blk-softirq.c
36328@@ -18,7 +18,7 @@ static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
36329 * Softirq action handler - move entries to local list and loop over them
36330 * while passing them to the queue registered handler.
36331 */
36332-static void blk_done_softirq(struct softirq_action *h)
36333+static __latent_entropy void blk_done_softirq(void)
36334 {
36335 struct list_head *cpu_list, local_list;
36336
36337diff --git a/block/bsg.c b/block/bsg.c
36338index 420a5a9..23834aa 100644
36339--- a/block/bsg.c
36340+++ b/block/bsg.c
36341@@ -176,16 +176,24 @@ static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,
36342 struct sg_io_v4 *hdr, struct bsg_device *bd,
36343 fmode_t has_write_perm)
36344 {
36345+ unsigned char tmpcmd[sizeof(rq->__cmd)];
36346+ unsigned char *cmdptr;
36347+
36348 if (hdr->request_len > BLK_MAX_CDB) {
36349 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
36350 if (!rq->cmd)
36351 return -ENOMEM;
36352- }
36353+ cmdptr = rq->cmd;
36354+ } else
36355+ cmdptr = tmpcmd;
36356
36357- if (copy_from_user(rq->cmd, (void __user *)(unsigned long)hdr->request,
36358+ if (copy_from_user(cmdptr, (void __user *)(unsigned long)hdr->request,
36359 hdr->request_len))
36360 return -EFAULT;
36361
36362+ if (cmdptr != rq->cmd)
36363+ memcpy(rq->cmd, cmdptr, hdr->request_len);
36364+
36365 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
36366 if (blk_verify_command(rq->cmd, has_write_perm))
36367 return -EPERM;
36368diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
36369index fbd5a67..f24fd95 100644
36370--- a/block/compat_ioctl.c
36371+++ b/block/compat_ioctl.c
36372@@ -156,7 +156,7 @@ static int compat_cdrom_generic_command(struct block_device *bdev, fmode_t mode,
36373 cgc = compat_alloc_user_space(sizeof(*cgc));
36374 cgc32 = compat_ptr(arg);
36375
36376- if (copy_in_user(&cgc->cmd, &cgc32->cmd, sizeof(cgc->cmd)) ||
36377+ if (copy_in_user(cgc->cmd, cgc32->cmd, sizeof(cgc->cmd)) ||
36378 get_user(data, &cgc32->buffer) ||
36379 put_user(compat_ptr(data), &cgc->buffer) ||
36380 copy_in_user(&cgc->buflen, &cgc32->buflen,
36381@@ -341,7 +341,7 @@ static int compat_fd_ioctl(struct block_device *bdev, fmode_t mode,
36382 err |= __get_user(f->spec1, &uf->spec1);
36383 err |= __get_user(f->fmt_gap, &uf->fmt_gap);
36384 err |= __get_user(name, &uf->name);
36385- f->name = compat_ptr(name);
36386+ f->name = (void __force_kernel *)compat_ptr(name);
36387 if (err) {
36388 err = -EFAULT;
36389 goto out;
36390diff --git a/block/genhd.c b/block/genhd.c
36391index 791f419..89f21c4 100644
36392--- a/block/genhd.c
36393+++ b/block/genhd.c
36394@@ -467,21 +467,24 @@ static char *bdevt_str(dev_t devt, char *buf)
36395
36396 /*
36397 * Register device numbers dev..(dev+range-1)
36398- * range must be nonzero
36399+ * Noop if @range is zero.
36400 * The hash chain is sorted on range, so that subranges can override.
36401 */
36402 void blk_register_region(dev_t devt, unsigned long range, struct module *module,
36403 struct kobject *(*probe)(dev_t, int *, void *),
36404 int (*lock)(dev_t, void *), void *data)
36405 {
36406- kobj_map(bdev_map, devt, range, module, probe, lock, data);
36407+ if (range)
36408+ kobj_map(bdev_map, devt, range, module, probe, lock, data);
36409 }
36410
36411 EXPORT_SYMBOL(blk_register_region);
36412
36413+/* undo blk_register_region(), noop if @range is zero */
36414 void blk_unregister_region(dev_t devt, unsigned long range)
36415 {
36416- kobj_unmap(bdev_map, devt, range);
36417+ if (range)
36418+ kobj_unmap(bdev_map, devt, range);
36419 }
36420
36421 EXPORT_SYMBOL(blk_unregister_region);
36422diff --git a/block/partitions/efi.c b/block/partitions/efi.c
36423index dc51f46..d5446a8 100644
36424--- a/block/partitions/efi.c
36425+++ b/block/partitions/efi.c
36426@@ -293,14 +293,14 @@ static gpt_entry *alloc_read_gpt_entries(struct parsed_partitions *state,
36427 if (!gpt)
36428 return NULL;
36429
36430+ if (!le32_to_cpu(gpt->num_partition_entries))
36431+ return NULL;
36432+ pte = kcalloc(le32_to_cpu(gpt->num_partition_entries), le32_to_cpu(gpt->sizeof_partition_entry), GFP_KERNEL);
36433+ if (!pte)
36434+ return NULL;
36435+
36436 count = le32_to_cpu(gpt->num_partition_entries) *
36437 le32_to_cpu(gpt->sizeof_partition_entry);
36438- if (!count)
36439- return NULL;
36440- pte = kmalloc(count, GFP_KERNEL);
36441- if (!pte)
36442- return NULL;
36443-
36444 if (read_lba(state, le64_to_cpu(gpt->partition_entry_lba),
36445 (u8 *) pte, count) < count) {
36446 kfree(pte);
36447diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
36448index 2648797..92ed21f 100644
36449--- a/block/scsi_ioctl.c
36450+++ b/block/scsi_ioctl.c
36451@@ -67,7 +67,7 @@ static int scsi_get_bus(struct request_queue *q, int __user *p)
36452 return put_user(0, p);
36453 }
36454
36455-static int sg_get_timeout(struct request_queue *q)
36456+static int __intentional_overflow(-1) sg_get_timeout(struct request_queue *q)
36457 {
36458 return jiffies_to_clock_t(q->sg_timeout);
36459 }
36460@@ -224,8 +224,20 @@ EXPORT_SYMBOL(blk_verify_command);
36461 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
36462 struct sg_io_hdr *hdr, fmode_t mode)
36463 {
36464- if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
36465+ unsigned char tmpcmd[sizeof(rq->__cmd)];
36466+ unsigned char *cmdptr;
36467+
36468+ if (rq->cmd != rq->__cmd)
36469+ cmdptr = rq->cmd;
36470+ else
36471+ cmdptr = tmpcmd;
36472+
36473+ if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
36474 return -EFAULT;
36475+
36476+ if (cmdptr != rq->cmd)
36477+ memcpy(rq->cmd, cmdptr, hdr->cmd_len);
36478+
36479 if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
36480 return -EPERM;
36481
36482@@ -417,6 +429,8 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
36483 int err;
36484 unsigned int in_len, out_len, bytes, opcode, cmdlen;
36485 char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
36486+ unsigned char tmpcmd[sizeof(rq->__cmd)];
36487+ unsigned char *cmdptr;
36488
36489 if (!sic)
36490 return -EINVAL;
36491@@ -450,9 +464,18 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
36492 */
36493 err = -EFAULT;
36494 rq->cmd_len = cmdlen;
36495- if (copy_from_user(rq->cmd, sic->data, cmdlen))
36496+
36497+ if (rq->cmd != rq->__cmd)
36498+ cmdptr = rq->cmd;
36499+ else
36500+ cmdptr = tmpcmd;
36501+
36502+ if (copy_from_user(cmdptr, sic->data, cmdlen))
36503 goto error;
36504
36505+ if (rq->cmd != cmdptr)
36506+ memcpy(rq->cmd, cmdptr, cmdlen);
36507+
36508 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
36509 goto error;
36510
36511diff --git a/crypto/cryptd.c b/crypto/cryptd.c
36512index 7bdd61b..afec999 100644
36513--- a/crypto/cryptd.c
36514+++ b/crypto/cryptd.c
36515@@ -63,7 +63,7 @@ struct cryptd_blkcipher_ctx {
36516
36517 struct cryptd_blkcipher_request_ctx {
36518 crypto_completion_t complete;
36519-};
36520+} __no_const;
36521
36522 struct cryptd_hash_ctx {
36523 struct crypto_shash *child;
36524@@ -80,7 +80,7 @@ struct cryptd_aead_ctx {
36525
36526 struct cryptd_aead_request_ctx {
36527 crypto_completion_t complete;
36528-};
36529+} __no_const;
36530
36531 static void cryptd_queue_worker(struct work_struct *work);
36532
36533diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c
36534index 309d345..1632720 100644
36535--- a/crypto/pcrypt.c
36536+++ b/crypto/pcrypt.c
36537@@ -440,7 +440,7 @@ static int pcrypt_sysfs_add(struct padata_instance *pinst, const char *name)
36538 int ret;
36539
36540 pinst->kobj.kset = pcrypt_kset;
36541- ret = kobject_add(&pinst->kobj, NULL, name);
36542+ ret = kobject_add(&pinst->kobj, NULL, "%s", name);
36543 if (!ret)
36544 kobject_uevent(&pinst->kobj, KOBJ_ADD);
36545
36546diff --git a/drivers/acpi/acpica/hwxfsleep.c b/drivers/acpi/acpica/hwxfsleep.c
36547index 15dddc1..b61cf0c 100644
36548--- a/drivers/acpi/acpica/hwxfsleep.c
36549+++ b/drivers/acpi/acpica/hwxfsleep.c
36550@@ -63,11 +63,12 @@ static acpi_status acpi_hw_sleep_dispatch(u8 sleep_state, u32 function_id);
36551 /* Legacy functions are optional, based upon ACPI_REDUCED_HARDWARE */
36552
36553 static struct acpi_sleep_functions acpi_sleep_dispatch[] = {
36554- {ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_sleep),
36555- acpi_hw_extended_sleep},
36556- {ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_wake_prep),
36557- acpi_hw_extended_wake_prep},
36558- {ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_wake), acpi_hw_extended_wake}
36559+ {.legacy_function = ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_sleep),
36560+ .extended_function = acpi_hw_extended_sleep},
36561+ {.legacy_function = ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_wake_prep),
36562+ .extended_function = acpi_hw_extended_wake_prep},
36563+ {.legacy_function = ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_wake),
36564+ .extended_function = acpi_hw_extended_wake}
36565 };
36566
36567 /*
36568diff --git a/drivers/acpi/apei/apei-internal.h b/drivers/acpi/apei/apei-internal.h
36569index e5bcd91..74f050d 100644
36570--- a/drivers/acpi/apei/apei-internal.h
36571+++ b/drivers/acpi/apei/apei-internal.h
36572@@ -19,7 +19,7 @@ typedef int (*apei_exec_ins_func_t)(struct apei_exec_context *ctx,
36573 struct apei_exec_ins_type {
36574 u32 flags;
36575 apei_exec_ins_func_t run;
36576-};
36577+} __do_const;
36578
36579 struct apei_exec_context {
36580 u32 ip;
36581diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
36582index dab7cb7..f0d2994 100644
36583--- a/drivers/acpi/apei/ghes.c
36584+++ b/drivers/acpi/apei/ghes.c
36585@@ -500,7 +500,7 @@ static void __ghes_print_estatus(const char *pfx,
36586 const struct acpi_hest_generic *generic,
36587 const struct acpi_generic_status *estatus)
36588 {
36589- static atomic_t seqno;
36590+ static atomic_unchecked_t seqno;
36591 unsigned int curr_seqno;
36592 char pfx_seq[64];
36593
36594@@ -511,7 +511,7 @@ static void __ghes_print_estatus(const char *pfx,
36595 else
36596 pfx = KERN_ERR;
36597 }
36598- curr_seqno = atomic_inc_return(&seqno);
36599+ curr_seqno = atomic_inc_return_unchecked(&seqno);
36600 snprintf(pfx_seq, sizeof(pfx_seq), "%s{%u}" HW_ERR, pfx, curr_seqno);
36601 printk("%s""Hardware error from APEI Generic Hardware Error Source: %d\n",
36602 pfx_seq, generic->header.source_id);
36603diff --git a/drivers/acpi/bgrt.c b/drivers/acpi/bgrt.c
36604index a83e3c6..c3d617f 100644
36605--- a/drivers/acpi/bgrt.c
36606+++ b/drivers/acpi/bgrt.c
36607@@ -86,8 +86,10 @@ static int __init bgrt_init(void)
36608 if (!bgrt_image)
36609 return -ENODEV;
36610
36611- bin_attr_image.private = bgrt_image;
36612- bin_attr_image.size = bgrt_image_size;
36613+ pax_open_kernel();
36614+ *(void **)&bin_attr_image.private = bgrt_image;
36615+ *(size_t *)&bin_attr_image.size = bgrt_image_size;
36616+ pax_close_kernel();
36617
36618 bgrt_kobj = kobject_create_and_add("bgrt", acpi_kobj);
36619 if (!bgrt_kobj)
36620diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c
36621index 3d8413d..95f638c 100644
36622--- a/drivers/acpi/blacklist.c
36623+++ b/drivers/acpi/blacklist.c
36624@@ -51,7 +51,7 @@ struct acpi_blacklist_item {
36625 u32 is_critical_error;
36626 };
36627
36628-static struct dmi_system_id acpi_osi_dmi_table[] __initdata;
36629+static const struct dmi_system_id acpi_osi_dmi_table[] __initconst;
36630
36631 /*
36632 * POLICY: If *anything* doesn't work, put it on the blacklist.
36633@@ -163,7 +163,7 @@ static int __init dmi_disable_osi_win8(const struct dmi_system_id *d)
36634 return 0;
36635 }
36636
36637-static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
36638+static const struct dmi_system_id acpi_osi_dmi_table[] __initconst = {
36639 {
36640 .callback = dmi_disable_osi_vista,
36641 .ident = "Fujitsu Siemens",
36642diff --git a/drivers/acpi/custom_method.c b/drivers/acpi/custom_method.c
36643index c68e724..e863008 100644
36644--- a/drivers/acpi/custom_method.c
36645+++ b/drivers/acpi/custom_method.c
36646@@ -29,6 +29,10 @@ static ssize_t cm_write(struct file *file, const char __user * user_buf,
36647 struct acpi_table_header table;
36648 acpi_status status;
36649
36650+#ifdef CONFIG_GRKERNSEC_KMEM
36651+ return -EPERM;
36652+#endif
36653+
36654 if (!(*ppos)) {
36655 /* parse the table header to get the table length */
36656 if (count <= sizeof(struct acpi_table_header))
36657diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
36658index 3dca36d..abaf070 100644
36659--- a/drivers/acpi/processor_idle.c
36660+++ b/drivers/acpi/processor_idle.c
36661@@ -952,7 +952,7 @@ static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr)
36662 {
36663 int i, count = CPUIDLE_DRIVER_STATE_START;
36664 struct acpi_processor_cx *cx;
36665- struct cpuidle_state *state;
36666+ cpuidle_state_no_const *state;
36667 struct cpuidle_driver *drv = &acpi_idle_driver;
36668
36669 if (!pr->flags.power_setup_done)
36670diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
36671index 91a32ce..d77fcaf 100644
36672--- a/drivers/acpi/sysfs.c
36673+++ b/drivers/acpi/sysfs.c
36674@@ -425,11 +425,11 @@ static u32 num_counters;
36675 static struct attribute **all_attrs;
36676 static u32 acpi_gpe_count;
36677
36678-static struct attribute_group interrupt_stats_attr_group = {
36679+static attribute_group_no_const interrupt_stats_attr_group = {
36680 .name = "interrupts",
36681 };
36682
36683-static struct kobj_attribute *counter_attrs;
36684+static kobj_attribute_no_const *counter_attrs;
36685
36686 static void delete_gpe_attr_array(void)
36687 {
36688diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
36689index 36605ab..6ef6d4b 100644
36690--- a/drivers/ata/libahci.c
36691+++ b/drivers/ata/libahci.c
36692@@ -1239,7 +1239,7 @@ int ahci_kick_engine(struct ata_port *ap)
36693 }
36694 EXPORT_SYMBOL_GPL(ahci_kick_engine);
36695
36696-static int ahci_exec_polled_cmd(struct ata_port *ap, int pmp,
36697+static int __intentional_overflow(-1) ahci_exec_polled_cmd(struct ata_port *ap, int pmp,
36698 struct ata_taskfile *tf, int is_cmd, u16 flags,
36699 unsigned long timeout_msec)
36700 {
36701diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
36702index bb26636..09cbdb4 100644
36703--- a/drivers/ata/libata-core.c
36704+++ b/drivers/ata/libata-core.c
36705@@ -98,7 +98,7 @@ static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
36706 static void ata_dev_xfermask(struct ata_device *dev);
36707 static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
36708
36709-atomic_t ata_print_id = ATOMIC_INIT(0);
36710+atomic_unchecked_t ata_print_id = ATOMIC_INIT(0);
36711
36712 struct ata_force_param {
36713 const char *name;
36714@@ -4858,7 +4858,7 @@ void ata_qc_free(struct ata_queued_cmd *qc)
36715 struct ata_port *ap;
36716 unsigned int tag;
36717
36718- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
36719+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
36720 ap = qc->ap;
36721
36722 qc->flags = 0;
36723@@ -4874,7 +4874,7 @@ void __ata_qc_complete(struct ata_queued_cmd *qc)
36724 struct ata_port *ap;
36725 struct ata_link *link;
36726
36727- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
36728+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
36729 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
36730 ap = qc->ap;
36731 link = qc->dev->link;
36732@@ -5993,6 +5993,7 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
36733 return;
36734
36735 spin_lock(&lock);
36736+ pax_open_kernel();
36737
36738 for (cur = ops->inherits; cur; cur = cur->inherits) {
36739 void **inherit = (void **)cur;
36740@@ -6006,8 +6007,9 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
36741 if (IS_ERR(*pp))
36742 *pp = NULL;
36743
36744- ops->inherits = NULL;
36745+ *(struct ata_port_operations **)&ops->inherits = NULL;
36746
36747+ pax_close_kernel();
36748 spin_unlock(&lock);
36749 }
36750
36751@@ -6200,7 +6202,7 @@ int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
36752
36753 /* give ports names and add SCSI hosts */
36754 for (i = 0; i < host->n_ports; i++) {
36755- host->ports[i]->print_id = atomic_inc_return(&ata_print_id);
36756+ host->ports[i]->print_id = atomic_inc_return_unchecked(&ata_print_id);
36757 host->ports[i]->local_port_no = i + 1;
36758 }
36759
36760diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
36761index ef8567d..8bdbd03 100644
36762--- a/drivers/ata/libata-scsi.c
36763+++ b/drivers/ata/libata-scsi.c
36764@@ -4147,7 +4147,7 @@ int ata_sas_port_init(struct ata_port *ap)
36765
36766 if (rc)
36767 return rc;
36768- ap->print_id = atomic_inc_return(&ata_print_id);
36769+ ap->print_id = atomic_inc_return_unchecked(&ata_print_id);
36770 return 0;
36771 }
36772 EXPORT_SYMBOL_GPL(ata_sas_port_init);
36773diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h
36774index 45b5ab3..98446b8 100644
36775--- a/drivers/ata/libata.h
36776+++ b/drivers/ata/libata.h
36777@@ -53,7 +53,7 @@ enum {
36778 ATA_DNXFER_QUIET = (1 << 31),
36779 };
36780
36781-extern atomic_t ata_print_id;
36782+extern atomic_unchecked_t ata_print_id;
36783 extern int atapi_passthru16;
36784 extern int libata_fua;
36785 extern int libata_noacpi;
36786diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c
36787index 73492dd..ca2bff5 100644
36788--- a/drivers/ata/pata_arasan_cf.c
36789+++ b/drivers/ata/pata_arasan_cf.c
36790@@ -865,7 +865,9 @@ static int arasan_cf_probe(struct platform_device *pdev)
36791 /* Handle platform specific quirks */
36792 if (quirk) {
36793 if (quirk & CF_BROKEN_PIO) {
36794- ap->ops->set_piomode = NULL;
36795+ pax_open_kernel();
36796+ *(void **)&ap->ops->set_piomode = NULL;
36797+ pax_close_kernel();
36798 ap->pio_mask = 0;
36799 }
36800 if (quirk & CF_BROKEN_MWDMA)
36801diff --git a/drivers/atm/adummy.c b/drivers/atm/adummy.c
36802index f9b983a..887b9d8 100644
36803--- a/drivers/atm/adummy.c
36804+++ b/drivers/atm/adummy.c
36805@@ -114,7 +114,7 @@ adummy_send(struct atm_vcc *vcc, struct sk_buff *skb)
36806 vcc->pop(vcc, skb);
36807 else
36808 dev_kfree_skb_any(skb);
36809- atomic_inc(&vcc->stats->tx);
36810+ atomic_inc_unchecked(&vcc->stats->tx);
36811
36812 return 0;
36813 }
36814diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c
36815index 62a7607..cc4be104 100644
36816--- a/drivers/atm/ambassador.c
36817+++ b/drivers/atm/ambassador.c
36818@@ -454,7 +454,7 @@ static void tx_complete (amb_dev * dev, tx_out * tx) {
36819 PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
36820
36821 // VC layer stats
36822- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
36823+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
36824
36825 // free the descriptor
36826 kfree (tx_descr);
36827@@ -495,7 +495,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
36828 dump_skb ("<<<", vc, skb);
36829
36830 // VC layer stats
36831- atomic_inc(&atm_vcc->stats->rx);
36832+ atomic_inc_unchecked(&atm_vcc->stats->rx);
36833 __net_timestamp(skb);
36834 // end of our responsibility
36835 atm_vcc->push (atm_vcc, skb);
36836@@ -510,7 +510,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
36837 } else {
36838 PRINTK (KERN_INFO, "dropped over-size frame");
36839 // should we count this?
36840- atomic_inc(&atm_vcc->stats->rx_drop);
36841+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
36842 }
36843
36844 } else {
36845@@ -1338,7 +1338,7 @@ static int amb_send (struct atm_vcc * atm_vcc, struct sk_buff * skb) {
36846 }
36847
36848 if (check_area (skb->data, skb->len)) {
36849- atomic_inc(&atm_vcc->stats->tx_err);
36850+ atomic_inc_unchecked(&atm_vcc->stats->tx_err);
36851 return -ENOMEM; // ?
36852 }
36853
36854diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c
36855index 0e3f8f9..765a7a5 100644
36856--- a/drivers/atm/atmtcp.c
36857+++ b/drivers/atm/atmtcp.c
36858@@ -206,7 +206,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
36859 if (vcc->pop) vcc->pop(vcc,skb);
36860 else dev_kfree_skb(skb);
36861 if (dev_data) return 0;
36862- atomic_inc(&vcc->stats->tx_err);
36863+ atomic_inc_unchecked(&vcc->stats->tx_err);
36864 return -ENOLINK;
36865 }
36866 size = skb->len+sizeof(struct atmtcp_hdr);
36867@@ -214,7 +214,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
36868 if (!new_skb) {
36869 if (vcc->pop) vcc->pop(vcc,skb);
36870 else dev_kfree_skb(skb);
36871- atomic_inc(&vcc->stats->tx_err);
36872+ atomic_inc_unchecked(&vcc->stats->tx_err);
36873 return -ENOBUFS;
36874 }
36875 hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
36876@@ -225,8 +225,8 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
36877 if (vcc->pop) vcc->pop(vcc,skb);
36878 else dev_kfree_skb(skb);
36879 out_vcc->push(out_vcc,new_skb);
36880- atomic_inc(&vcc->stats->tx);
36881- atomic_inc(&out_vcc->stats->rx);
36882+ atomic_inc_unchecked(&vcc->stats->tx);
36883+ atomic_inc_unchecked(&out_vcc->stats->rx);
36884 return 0;
36885 }
36886
36887@@ -299,7 +299,7 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
36888 out_vcc = find_vcc(dev, ntohs(hdr->vpi), ntohs(hdr->vci));
36889 read_unlock(&vcc_sklist_lock);
36890 if (!out_vcc) {
36891- atomic_inc(&vcc->stats->tx_err);
36892+ atomic_inc_unchecked(&vcc->stats->tx_err);
36893 goto done;
36894 }
36895 skb_pull(skb,sizeof(struct atmtcp_hdr));
36896@@ -311,8 +311,8 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
36897 __net_timestamp(new_skb);
36898 skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
36899 out_vcc->push(out_vcc,new_skb);
36900- atomic_inc(&vcc->stats->tx);
36901- atomic_inc(&out_vcc->stats->rx);
36902+ atomic_inc_unchecked(&vcc->stats->tx);
36903+ atomic_inc_unchecked(&out_vcc->stats->rx);
36904 done:
36905 if (vcc->pop) vcc->pop(vcc,skb);
36906 else dev_kfree_skb(skb);
36907diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
36908index b1955ba..b179940 100644
36909--- a/drivers/atm/eni.c
36910+++ b/drivers/atm/eni.c
36911@@ -522,7 +522,7 @@ static int rx_aal0(struct atm_vcc *vcc)
36912 DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
36913 vcc->dev->number);
36914 length = 0;
36915- atomic_inc(&vcc->stats->rx_err);
36916+ atomic_inc_unchecked(&vcc->stats->rx_err);
36917 }
36918 else {
36919 length = ATM_CELL_SIZE-1; /* no HEC */
36920@@ -577,7 +577,7 @@ static int rx_aal5(struct atm_vcc *vcc)
36921 size);
36922 }
36923 eff = length = 0;
36924- atomic_inc(&vcc->stats->rx_err);
36925+ atomic_inc_unchecked(&vcc->stats->rx_err);
36926 }
36927 else {
36928 size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
36929@@ -594,7 +594,7 @@ static int rx_aal5(struct atm_vcc *vcc)
36930 "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
36931 vcc->dev->number,vcc->vci,length,size << 2,descr);
36932 length = eff = 0;
36933- atomic_inc(&vcc->stats->rx_err);
36934+ atomic_inc_unchecked(&vcc->stats->rx_err);
36935 }
36936 }
36937 skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
36938@@ -767,7 +767,7 @@ rx_dequeued++;
36939 vcc->push(vcc,skb);
36940 pushed++;
36941 }
36942- atomic_inc(&vcc->stats->rx);
36943+ atomic_inc_unchecked(&vcc->stats->rx);
36944 }
36945 wake_up(&eni_dev->rx_wait);
36946 }
36947@@ -1227,7 +1227,7 @@ static void dequeue_tx(struct atm_dev *dev)
36948 PCI_DMA_TODEVICE);
36949 if (vcc->pop) vcc->pop(vcc,skb);
36950 else dev_kfree_skb_irq(skb);
36951- atomic_inc(&vcc->stats->tx);
36952+ atomic_inc_unchecked(&vcc->stats->tx);
36953 wake_up(&eni_dev->tx_wait);
36954 dma_complete++;
36955 }
36956diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
36957index b41c948..a002b17 100644
36958--- a/drivers/atm/firestream.c
36959+++ b/drivers/atm/firestream.c
36960@@ -749,7 +749,7 @@ static void process_txdone_queue (struct fs_dev *dev, struct queue *q)
36961 }
36962 }
36963
36964- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
36965+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
36966
36967 fs_dprintk (FS_DEBUG_TXMEM, "i");
36968 fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
36969@@ -816,7 +816,7 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
36970 #endif
36971 skb_put (skb, qe->p1 & 0xffff);
36972 ATM_SKB(skb)->vcc = atm_vcc;
36973- atomic_inc(&atm_vcc->stats->rx);
36974+ atomic_inc_unchecked(&atm_vcc->stats->rx);
36975 __net_timestamp(skb);
36976 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
36977 atm_vcc->push (atm_vcc, skb);
36978@@ -837,12 +837,12 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
36979 kfree (pe);
36980 }
36981 if (atm_vcc)
36982- atomic_inc(&atm_vcc->stats->rx_drop);
36983+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
36984 break;
36985 case 0x1f: /* Reassembly abort: no buffers. */
36986 /* Silently increment error counter. */
36987 if (atm_vcc)
36988- atomic_inc(&atm_vcc->stats->rx_drop);
36989+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
36990 break;
36991 default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
36992 printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
36993diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
36994index 204814e..cede831 100644
36995--- a/drivers/atm/fore200e.c
36996+++ b/drivers/atm/fore200e.c
36997@@ -931,9 +931,9 @@ fore200e_tx_irq(struct fore200e* fore200e)
36998 #endif
36999 /* check error condition */
37000 if (*entry->status & STATUS_ERROR)
37001- atomic_inc(&vcc->stats->tx_err);
37002+ atomic_inc_unchecked(&vcc->stats->tx_err);
37003 else
37004- atomic_inc(&vcc->stats->tx);
37005+ atomic_inc_unchecked(&vcc->stats->tx);
37006 }
37007 }
37008
37009@@ -1082,7 +1082,7 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
37010 if (skb == NULL) {
37011 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
37012
37013- atomic_inc(&vcc->stats->rx_drop);
37014+ atomic_inc_unchecked(&vcc->stats->rx_drop);
37015 return -ENOMEM;
37016 }
37017
37018@@ -1125,14 +1125,14 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
37019
37020 dev_kfree_skb_any(skb);
37021
37022- atomic_inc(&vcc->stats->rx_drop);
37023+ atomic_inc_unchecked(&vcc->stats->rx_drop);
37024 return -ENOMEM;
37025 }
37026
37027 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
37028
37029 vcc->push(vcc, skb);
37030- atomic_inc(&vcc->stats->rx);
37031+ atomic_inc_unchecked(&vcc->stats->rx);
37032
37033 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
37034
37035@@ -1210,7 +1210,7 @@ fore200e_rx_irq(struct fore200e* fore200e)
37036 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
37037 fore200e->atm_dev->number,
37038 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
37039- atomic_inc(&vcc->stats->rx_err);
37040+ atomic_inc_unchecked(&vcc->stats->rx_err);
37041 }
37042 }
37043
37044@@ -1655,7 +1655,7 @@ fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb)
37045 goto retry_here;
37046 }
37047
37048- atomic_inc(&vcc->stats->tx_err);
37049+ atomic_inc_unchecked(&vcc->stats->tx_err);
37050
37051 fore200e->tx_sat++;
37052 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
37053diff --git a/drivers/atm/he.c b/drivers/atm/he.c
37054index aa6be26..f70a785 100644
37055--- a/drivers/atm/he.c
37056+++ b/drivers/atm/he.c
37057@@ -1690,7 +1690,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
37058
37059 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
37060 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
37061- atomic_inc(&vcc->stats->rx_drop);
37062+ atomic_inc_unchecked(&vcc->stats->rx_drop);
37063 goto return_host_buffers;
37064 }
37065
37066@@ -1717,7 +1717,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
37067 RBRQ_LEN_ERR(he_dev->rbrq_head)
37068 ? "LEN_ERR" : "",
37069 vcc->vpi, vcc->vci);
37070- atomic_inc(&vcc->stats->rx_err);
37071+ atomic_inc_unchecked(&vcc->stats->rx_err);
37072 goto return_host_buffers;
37073 }
37074
37075@@ -1769,7 +1769,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
37076 vcc->push(vcc, skb);
37077 spin_lock(&he_dev->global_lock);
37078
37079- atomic_inc(&vcc->stats->rx);
37080+ atomic_inc_unchecked(&vcc->stats->rx);
37081
37082 return_host_buffers:
37083 ++pdus_assembled;
37084@@ -2095,7 +2095,7 @@ __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
37085 tpd->vcc->pop(tpd->vcc, tpd->skb);
37086 else
37087 dev_kfree_skb_any(tpd->skb);
37088- atomic_inc(&tpd->vcc->stats->tx_err);
37089+ atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
37090 }
37091 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
37092 return;
37093@@ -2507,7 +2507,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
37094 vcc->pop(vcc, skb);
37095 else
37096 dev_kfree_skb_any(skb);
37097- atomic_inc(&vcc->stats->tx_err);
37098+ atomic_inc_unchecked(&vcc->stats->tx_err);
37099 return -EINVAL;
37100 }
37101
37102@@ -2518,7 +2518,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
37103 vcc->pop(vcc, skb);
37104 else
37105 dev_kfree_skb_any(skb);
37106- atomic_inc(&vcc->stats->tx_err);
37107+ atomic_inc_unchecked(&vcc->stats->tx_err);
37108 return -EINVAL;
37109 }
37110 #endif
37111@@ -2530,7 +2530,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
37112 vcc->pop(vcc, skb);
37113 else
37114 dev_kfree_skb_any(skb);
37115- atomic_inc(&vcc->stats->tx_err);
37116+ atomic_inc_unchecked(&vcc->stats->tx_err);
37117 spin_unlock_irqrestore(&he_dev->global_lock, flags);
37118 return -ENOMEM;
37119 }
37120@@ -2572,7 +2572,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
37121 vcc->pop(vcc, skb);
37122 else
37123 dev_kfree_skb_any(skb);
37124- atomic_inc(&vcc->stats->tx_err);
37125+ atomic_inc_unchecked(&vcc->stats->tx_err);
37126 spin_unlock_irqrestore(&he_dev->global_lock, flags);
37127 return -ENOMEM;
37128 }
37129@@ -2603,7 +2603,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
37130 __enqueue_tpd(he_dev, tpd, cid);
37131 spin_unlock_irqrestore(&he_dev->global_lock, flags);
37132
37133- atomic_inc(&vcc->stats->tx);
37134+ atomic_inc_unchecked(&vcc->stats->tx);
37135
37136 return 0;
37137 }
37138diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c
37139index 1dc0519..1aadaf7 100644
37140--- a/drivers/atm/horizon.c
37141+++ b/drivers/atm/horizon.c
37142@@ -1034,7 +1034,7 @@ static void rx_schedule (hrz_dev * dev, int irq) {
37143 {
37144 struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
37145 // VC layer stats
37146- atomic_inc(&vcc->stats->rx);
37147+ atomic_inc_unchecked(&vcc->stats->rx);
37148 __net_timestamp(skb);
37149 // end of our responsibility
37150 vcc->push (vcc, skb);
37151@@ -1186,7 +1186,7 @@ static void tx_schedule (hrz_dev * const dev, int irq) {
37152 dev->tx_iovec = NULL;
37153
37154 // VC layer stats
37155- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
37156+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
37157
37158 // free the skb
37159 hrz_kfree_skb (skb);
37160diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
37161index 1bdf104..9dc44b1 100644
37162--- a/drivers/atm/idt77252.c
37163+++ b/drivers/atm/idt77252.c
37164@@ -812,7 +812,7 @@ drain_scq(struct idt77252_dev *card, struct vc_map *vc)
37165 else
37166 dev_kfree_skb(skb);
37167
37168- atomic_inc(&vcc->stats->tx);
37169+ atomic_inc_unchecked(&vcc->stats->tx);
37170 }
37171
37172 atomic_dec(&scq->used);
37173@@ -1075,13 +1075,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
37174 if ((sb = dev_alloc_skb(64)) == NULL) {
37175 printk("%s: Can't allocate buffers for aal0.\n",
37176 card->name);
37177- atomic_add(i, &vcc->stats->rx_drop);
37178+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
37179 break;
37180 }
37181 if (!atm_charge(vcc, sb->truesize)) {
37182 RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
37183 card->name);
37184- atomic_add(i - 1, &vcc->stats->rx_drop);
37185+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
37186 dev_kfree_skb(sb);
37187 break;
37188 }
37189@@ -1098,7 +1098,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
37190 ATM_SKB(sb)->vcc = vcc;
37191 __net_timestamp(sb);
37192 vcc->push(vcc, sb);
37193- atomic_inc(&vcc->stats->rx);
37194+ atomic_inc_unchecked(&vcc->stats->rx);
37195
37196 cell += ATM_CELL_PAYLOAD;
37197 }
37198@@ -1135,13 +1135,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
37199 "(CDC: %08x)\n",
37200 card->name, len, rpp->len, readl(SAR_REG_CDC));
37201 recycle_rx_pool_skb(card, rpp);
37202- atomic_inc(&vcc->stats->rx_err);
37203+ atomic_inc_unchecked(&vcc->stats->rx_err);
37204 return;
37205 }
37206 if (stat & SAR_RSQE_CRC) {
37207 RXPRINTK("%s: AAL5 CRC error.\n", card->name);
37208 recycle_rx_pool_skb(card, rpp);
37209- atomic_inc(&vcc->stats->rx_err);
37210+ atomic_inc_unchecked(&vcc->stats->rx_err);
37211 return;
37212 }
37213 if (skb_queue_len(&rpp->queue) > 1) {
37214@@ -1152,7 +1152,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
37215 RXPRINTK("%s: Can't alloc RX skb.\n",
37216 card->name);
37217 recycle_rx_pool_skb(card, rpp);
37218- atomic_inc(&vcc->stats->rx_err);
37219+ atomic_inc_unchecked(&vcc->stats->rx_err);
37220 return;
37221 }
37222 if (!atm_charge(vcc, skb->truesize)) {
37223@@ -1171,7 +1171,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
37224 __net_timestamp(skb);
37225
37226 vcc->push(vcc, skb);
37227- atomic_inc(&vcc->stats->rx);
37228+ atomic_inc_unchecked(&vcc->stats->rx);
37229
37230 return;
37231 }
37232@@ -1193,7 +1193,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
37233 __net_timestamp(skb);
37234
37235 vcc->push(vcc, skb);
37236- atomic_inc(&vcc->stats->rx);
37237+ atomic_inc_unchecked(&vcc->stats->rx);
37238
37239 if (skb->truesize > SAR_FB_SIZE_3)
37240 add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
37241@@ -1304,14 +1304,14 @@ idt77252_rx_raw(struct idt77252_dev *card)
37242 if (vcc->qos.aal != ATM_AAL0) {
37243 RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
37244 card->name, vpi, vci);
37245- atomic_inc(&vcc->stats->rx_drop);
37246+ atomic_inc_unchecked(&vcc->stats->rx_drop);
37247 goto drop;
37248 }
37249
37250 if ((sb = dev_alloc_skb(64)) == NULL) {
37251 printk("%s: Can't allocate buffers for AAL0.\n",
37252 card->name);
37253- atomic_inc(&vcc->stats->rx_err);
37254+ atomic_inc_unchecked(&vcc->stats->rx_err);
37255 goto drop;
37256 }
37257
37258@@ -1330,7 +1330,7 @@ idt77252_rx_raw(struct idt77252_dev *card)
37259 ATM_SKB(sb)->vcc = vcc;
37260 __net_timestamp(sb);
37261 vcc->push(vcc, sb);
37262- atomic_inc(&vcc->stats->rx);
37263+ atomic_inc_unchecked(&vcc->stats->rx);
37264
37265 drop:
37266 skb_pull(queue, 64);
37267@@ -1955,13 +1955,13 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
37268
37269 if (vc == NULL) {
37270 printk("%s: NULL connection in send().\n", card->name);
37271- atomic_inc(&vcc->stats->tx_err);
37272+ atomic_inc_unchecked(&vcc->stats->tx_err);
37273 dev_kfree_skb(skb);
37274 return -EINVAL;
37275 }
37276 if (!test_bit(VCF_TX, &vc->flags)) {
37277 printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
37278- atomic_inc(&vcc->stats->tx_err);
37279+ atomic_inc_unchecked(&vcc->stats->tx_err);
37280 dev_kfree_skb(skb);
37281 return -EINVAL;
37282 }
37283@@ -1973,14 +1973,14 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
37284 break;
37285 default:
37286 printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
37287- atomic_inc(&vcc->stats->tx_err);
37288+ atomic_inc_unchecked(&vcc->stats->tx_err);
37289 dev_kfree_skb(skb);
37290 return -EINVAL;
37291 }
37292
37293 if (skb_shinfo(skb)->nr_frags != 0) {
37294 printk("%s: No scatter-gather yet.\n", card->name);
37295- atomic_inc(&vcc->stats->tx_err);
37296+ atomic_inc_unchecked(&vcc->stats->tx_err);
37297 dev_kfree_skb(skb);
37298 return -EINVAL;
37299 }
37300@@ -1988,7 +1988,7 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
37301
37302 err = queue_skb(card, vc, skb, oam);
37303 if (err) {
37304- atomic_inc(&vcc->stats->tx_err);
37305+ atomic_inc_unchecked(&vcc->stats->tx_err);
37306 dev_kfree_skb(skb);
37307 return err;
37308 }
37309@@ -2011,7 +2011,7 @@ idt77252_send_oam(struct atm_vcc *vcc, void *cell, int flags)
37310 skb = dev_alloc_skb(64);
37311 if (!skb) {
37312 printk("%s: Out of memory in send_oam().\n", card->name);
37313- atomic_inc(&vcc->stats->tx_err);
37314+ atomic_inc_unchecked(&vcc->stats->tx_err);
37315 return -ENOMEM;
37316 }
37317 atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
37318diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
37319index 4217f29..88f547a 100644
37320--- a/drivers/atm/iphase.c
37321+++ b/drivers/atm/iphase.c
37322@@ -1145,7 +1145,7 @@ static int rx_pkt(struct atm_dev *dev)
37323 status = (u_short) (buf_desc_ptr->desc_mode);
37324 if (status & (RX_CER | RX_PTE | RX_OFL))
37325 {
37326- atomic_inc(&vcc->stats->rx_err);
37327+ atomic_inc_unchecked(&vcc->stats->rx_err);
37328 IF_ERR(printk("IA: bad packet, dropping it");)
37329 if (status & RX_CER) {
37330 IF_ERR(printk(" cause: packet CRC error\n");)
37331@@ -1168,7 +1168,7 @@ static int rx_pkt(struct atm_dev *dev)
37332 len = dma_addr - buf_addr;
37333 if (len > iadev->rx_buf_sz) {
37334 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
37335- atomic_inc(&vcc->stats->rx_err);
37336+ atomic_inc_unchecked(&vcc->stats->rx_err);
37337 goto out_free_desc;
37338 }
37339
37340@@ -1318,7 +1318,7 @@ static void rx_dle_intr(struct atm_dev *dev)
37341 ia_vcc = INPH_IA_VCC(vcc);
37342 if (ia_vcc == NULL)
37343 {
37344- atomic_inc(&vcc->stats->rx_err);
37345+ atomic_inc_unchecked(&vcc->stats->rx_err);
37346 atm_return(vcc, skb->truesize);
37347 dev_kfree_skb_any(skb);
37348 goto INCR_DLE;
37349@@ -1330,7 +1330,7 @@ static void rx_dle_intr(struct atm_dev *dev)
37350 if ((length > iadev->rx_buf_sz) || (length >
37351 (skb->len - sizeof(struct cpcs_trailer))))
37352 {
37353- atomic_inc(&vcc->stats->rx_err);
37354+ atomic_inc_unchecked(&vcc->stats->rx_err);
37355 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
37356 length, skb->len);)
37357 atm_return(vcc, skb->truesize);
37358@@ -1346,7 +1346,7 @@ static void rx_dle_intr(struct atm_dev *dev)
37359
37360 IF_RX(printk("rx_dle_intr: skb push");)
37361 vcc->push(vcc,skb);
37362- atomic_inc(&vcc->stats->rx);
37363+ atomic_inc_unchecked(&vcc->stats->rx);
37364 iadev->rx_pkt_cnt++;
37365 }
37366 INCR_DLE:
37367@@ -2826,15 +2826,15 @@ static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
37368 {
37369 struct k_sonet_stats *stats;
37370 stats = &PRIV(_ia_dev[board])->sonet_stats;
37371- printk("section_bip: %d\n", atomic_read(&stats->section_bip));
37372- printk("line_bip : %d\n", atomic_read(&stats->line_bip));
37373- printk("path_bip : %d\n", atomic_read(&stats->path_bip));
37374- printk("line_febe : %d\n", atomic_read(&stats->line_febe));
37375- printk("path_febe : %d\n", atomic_read(&stats->path_febe));
37376- printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
37377- printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
37378- printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
37379- printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
37380+ printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
37381+ printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip));
37382+ printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip));
37383+ printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe));
37384+ printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe));
37385+ printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs));
37386+ printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
37387+ printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells));
37388+ printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells));
37389 }
37390 ia_cmds.status = 0;
37391 break;
37392@@ -2939,7 +2939,7 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
37393 if ((desc == 0) || (desc > iadev->num_tx_desc))
37394 {
37395 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
37396- atomic_inc(&vcc->stats->tx);
37397+ atomic_inc_unchecked(&vcc->stats->tx);
37398 if (vcc->pop)
37399 vcc->pop(vcc, skb);
37400 else
37401@@ -3044,14 +3044,14 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
37402 ATM_DESC(skb) = vcc->vci;
37403 skb_queue_tail(&iadev->tx_dma_q, skb);
37404
37405- atomic_inc(&vcc->stats->tx);
37406+ atomic_inc_unchecked(&vcc->stats->tx);
37407 iadev->tx_pkt_cnt++;
37408 /* Increment transaction counter */
37409 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
37410
37411 #if 0
37412 /* add flow control logic */
37413- if (atomic_read(&vcc->stats->tx) % 20 == 0) {
37414+ if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
37415 if (iavcc->vc_desc_cnt > 10) {
37416 vcc->tx_quota = vcc->tx_quota * 3 / 4;
37417 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
37418diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c
37419index fa7d701..1e404c7 100644
37420--- a/drivers/atm/lanai.c
37421+++ b/drivers/atm/lanai.c
37422@@ -1303,7 +1303,7 @@ static void lanai_send_one_aal5(struct lanai_dev *lanai,
37423 vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
37424 lanai_endtx(lanai, lvcc);
37425 lanai_free_skb(lvcc->tx.atmvcc, skb);
37426- atomic_inc(&lvcc->tx.atmvcc->stats->tx);
37427+ atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
37428 }
37429
37430 /* Try to fill the buffer - don't call unless there is backlog */
37431@@ -1426,7 +1426,7 @@ static void vcc_rx_aal5(struct lanai_vcc *lvcc, int endptr)
37432 ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
37433 __net_timestamp(skb);
37434 lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
37435- atomic_inc(&lvcc->rx.atmvcc->stats->rx);
37436+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
37437 out:
37438 lvcc->rx.buf.ptr = end;
37439 cardvcc_write(lvcc, endptr, vcc_rxreadptr);
37440@@ -1667,7 +1667,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
37441 DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
37442 "vcc %d\n", lanai->number, (unsigned int) s, vci);
37443 lanai->stats.service_rxnotaal5++;
37444- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
37445+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
37446 return 0;
37447 }
37448 if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
37449@@ -1679,7 +1679,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
37450 int bytes;
37451 read_unlock(&vcc_sklist_lock);
37452 DPRINTK("got trashed rx pdu on vci %d\n", vci);
37453- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
37454+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
37455 lvcc->stats.x.aal5.service_trash++;
37456 bytes = (SERVICE_GET_END(s) * 16) -
37457 (((unsigned long) lvcc->rx.buf.ptr) -
37458@@ -1691,7 +1691,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
37459 }
37460 if (s & SERVICE_STREAM) {
37461 read_unlock(&vcc_sklist_lock);
37462- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
37463+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
37464 lvcc->stats.x.aal5.service_stream++;
37465 printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
37466 "PDU on VCI %d!\n", lanai->number, vci);
37467@@ -1699,7 +1699,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
37468 return 0;
37469 }
37470 DPRINTK("got rx crc error on vci %d\n", vci);
37471- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
37472+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
37473 lvcc->stats.x.aal5.service_rxcrc++;
37474 lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
37475 cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
37476diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
37477index 9587e95..b45c5cb 100644
37478--- a/drivers/atm/nicstar.c
37479+++ b/drivers/atm/nicstar.c
37480@@ -1640,7 +1640,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
37481 if ((vc = (vc_map *) vcc->dev_data) == NULL) {
37482 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n",
37483 card->index);
37484- atomic_inc(&vcc->stats->tx_err);
37485+ atomic_inc_unchecked(&vcc->stats->tx_err);
37486 dev_kfree_skb_any(skb);
37487 return -EINVAL;
37488 }
37489@@ -1648,7 +1648,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
37490 if (!vc->tx) {
37491 printk("nicstar%d: Trying to transmit on a non-tx VC.\n",
37492 card->index);
37493- atomic_inc(&vcc->stats->tx_err);
37494+ atomic_inc_unchecked(&vcc->stats->tx_err);
37495 dev_kfree_skb_any(skb);
37496 return -EINVAL;
37497 }
37498@@ -1656,14 +1656,14 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
37499 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) {
37500 printk("nicstar%d: Only AAL0 and AAL5 are supported.\n",
37501 card->index);
37502- atomic_inc(&vcc->stats->tx_err);
37503+ atomic_inc_unchecked(&vcc->stats->tx_err);
37504 dev_kfree_skb_any(skb);
37505 return -EINVAL;
37506 }
37507
37508 if (skb_shinfo(skb)->nr_frags != 0) {
37509 printk("nicstar%d: No scatter-gather yet.\n", card->index);
37510- atomic_inc(&vcc->stats->tx_err);
37511+ atomic_inc_unchecked(&vcc->stats->tx_err);
37512 dev_kfree_skb_any(skb);
37513 return -EINVAL;
37514 }
37515@@ -1711,11 +1711,11 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
37516 }
37517
37518 if (push_scqe(card, vc, scq, &scqe, skb) != 0) {
37519- atomic_inc(&vcc->stats->tx_err);
37520+ atomic_inc_unchecked(&vcc->stats->tx_err);
37521 dev_kfree_skb_any(skb);
37522 return -EIO;
37523 }
37524- atomic_inc(&vcc->stats->tx);
37525+ atomic_inc_unchecked(&vcc->stats->tx);
37526
37527 return 0;
37528 }
37529@@ -2032,14 +2032,14 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37530 printk
37531 ("nicstar%d: Can't allocate buffers for aal0.\n",
37532 card->index);
37533- atomic_add(i, &vcc->stats->rx_drop);
37534+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
37535 break;
37536 }
37537 if (!atm_charge(vcc, sb->truesize)) {
37538 RXPRINTK
37539 ("nicstar%d: atm_charge() dropped aal0 packets.\n",
37540 card->index);
37541- atomic_add(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
37542+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
37543 dev_kfree_skb_any(sb);
37544 break;
37545 }
37546@@ -2054,7 +2054,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37547 ATM_SKB(sb)->vcc = vcc;
37548 __net_timestamp(sb);
37549 vcc->push(vcc, sb);
37550- atomic_inc(&vcc->stats->rx);
37551+ atomic_inc_unchecked(&vcc->stats->rx);
37552 cell += ATM_CELL_PAYLOAD;
37553 }
37554
37555@@ -2071,7 +2071,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37556 if (iovb == NULL) {
37557 printk("nicstar%d: Out of iovec buffers.\n",
37558 card->index);
37559- atomic_inc(&vcc->stats->rx_drop);
37560+ atomic_inc_unchecked(&vcc->stats->rx_drop);
37561 recycle_rx_buf(card, skb);
37562 return;
37563 }
37564@@ -2095,7 +2095,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37565 small or large buffer itself. */
37566 } else if (NS_PRV_IOVCNT(iovb) >= NS_MAX_IOVECS) {
37567 printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
37568- atomic_inc(&vcc->stats->rx_err);
37569+ atomic_inc_unchecked(&vcc->stats->rx_err);
37570 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
37571 NS_MAX_IOVECS);
37572 NS_PRV_IOVCNT(iovb) = 0;
37573@@ -2115,7 +2115,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37574 ("nicstar%d: Expected a small buffer, and this is not one.\n",
37575 card->index);
37576 which_list(card, skb);
37577- atomic_inc(&vcc->stats->rx_err);
37578+ atomic_inc_unchecked(&vcc->stats->rx_err);
37579 recycle_rx_buf(card, skb);
37580 vc->rx_iov = NULL;
37581 recycle_iov_buf(card, iovb);
37582@@ -2128,7 +2128,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37583 ("nicstar%d: Expected a large buffer, and this is not one.\n",
37584 card->index);
37585 which_list(card, skb);
37586- atomic_inc(&vcc->stats->rx_err);
37587+ atomic_inc_unchecked(&vcc->stats->rx_err);
37588 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
37589 NS_PRV_IOVCNT(iovb));
37590 vc->rx_iov = NULL;
37591@@ -2151,7 +2151,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37592 printk(" - PDU size mismatch.\n");
37593 else
37594 printk(".\n");
37595- atomic_inc(&vcc->stats->rx_err);
37596+ atomic_inc_unchecked(&vcc->stats->rx_err);
37597 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
37598 NS_PRV_IOVCNT(iovb));
37599 vc->rx_iov = NULL;
37600@@ -2165,7 +2165,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37601 /* skb points to a small buffer */
37602 if (!atm_charge(vcc, skb->truesize)) {
37603 push_rxbufs(card, skb);
37604- atomic_inc(&vcc->stats->rx_drop);
37605+ atomic_inc_unchecked(&vcc->stats->rx_drop);
37606 } else {
37607 skb_put(skb, len);
37608 dequeue_sm_buf(card, skb);
37609@@ -2175,7 +2175,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37610 ATM_SKB(skb)->vcc = vcc;
37611 __net_timestamp(skb);
37612 vcc->push(vcc, skb);
37613- atomic_inc(&vcc->stats->rx);
37614+ atomic_inc_unchecked(&vcc->stats->rx);
37615 }
37616 } else if (NS_PRV_IOVCNT(iovb) == 2) { /* One small plus one large buffer */
37617 struct sk_buff *sb;
37618@@ -2186,7 +2186,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37619 if (len <= NS_SMBUFSIZE) {
37620 if (!atm_charge(vcc, sb->truesize)) {
37621 push_rxbufs(card, sb);
37622- atomic_inc(&vcc->stats->rx_drop);
37623+ atomic_inc_unchecked(&vcc->stats->rx_drop);
37624 } else {
37625 skb_put(sb, len);
37626 dequeue_sm_buf(card, sb);
37627@@ -2196,7 +2196,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37628 ATM_SKB(sb)->vcc = vcc;
37629 __net_timestamp(sb);
37630 vcc->push(vcc, sb);
37631- atomic_inc(&vcc->stats->rx);
37632+ atomic_inc_unchecked(&vcc->stats->rx);
37633 }
37634
37635 push_rxbufs(card, skb);
37636@@ -2205,7 +2205,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37637
37638 if (!atm_charge(vcc, skb->truesize)) {
37639 push_rxbufs(card, skb);
37640- atomic_inc(&vcc->stats->rx_drop);
37641+ atomic_inc_unchecked(&vcc->stats->rx_drop);
37642 } else {
37643 dequeue_lg_buf(card, skb);
37644 #ifdef NS_USE_DESTRUCTORS
37645@@ -2218,7 +2218,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37646 ATM_SKB(skb)->vcc = vcc;
37647 __net_timestamp(skb);
37648 vcc->push(vcc, skb);
37649- atomic_inc(&vcc->stats->rx);
37650+ atomic_inc_unchecked(&vcc->stats->rx);
37651 }
37652
37653 push_rxbufs(card, sb);
37654@@ -2239,7 +2239,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37655 printk
37656 ("nicstar%d: Out of huge buffers.\n",
37657 card->index);
37658- atomic_inc(&vcc->stats->rx_drop);
37659+ atomic_inc_unchecked(&vcc->stats->rx_drop);
37660 recycle_iovec_rx_bufs(card,
37661 (struct iovec *)
37662 iovb->data,
37663@@ -2290,7 +2290,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37664 card->hbpool.count++;
37665 } else
37666 dev_kfree_skb_any(hb);
37667- atomic_inc(&vcc->stats->rx_drop);
37668+ atomic_inc_unchecked(&vcc->stats->rx_drop);
37669 } else {
37670 /* Copy the small buffer to the huge buffer */
37671 sb = (struct sk_buff *)iov->iov_base;
37672@@ -2327,7 +2327,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37673 #endif /* NS_USE_DESTRUCTORS */
37674 __net_timestamp(hb);
37675 vcc->push(vcc, hb);
37676- atomic_inc(&vcc->stats->rx);
37677+ atomic_inc_unchecked(&vcc->stats->rx);
37678 }
37679 }
37680
37681diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
37682index e3fb496..d9646bf 100644
37683--- a/drivers/atm/solos-pci.c
37684+++ b/drivers/atm/solos-pci.c
37685@@ -838,7 +838,7 @@ void solos_bh(unsigned long card_arg)
37686 }
37687 atm_charge(vcc, skb->truesize);
37688 vcc->push(vcc, skb);
37689- atomic_inc(&vcc->stats->rx);
37690+ atomic_inc_unchecked(&vcc->stats->rx);
37691 break;
37692
37693 case PKT_STATUS:
37694@@ -1116,7 +1116,7 @@ static uint32_t fpga_tx(struct solos_card *card)
37695 vcc = SKB_CB(oldskb)->vcc;
37696
37697 if (vcc) {
37698- atomic_inc(&vcc->stats->tx);
37699+ atomic_inc_unchecked(&vcc->stats->tx);
37700 solos_pop(vcc, oldskb);
37701 } else {
37702 dev_kfree_skb_irq(oldskb);
37703diff --git a/drivers/atm/suni.c b/drivers/atm/suni.c
37704index 0215934..ce9f5b1 100644
37705--- a/drivers/atm/suni.c
37706+++ b/drivers/atm/suni.c
37707@@ -49,8 +49,8 @@ static DEFINE_SPINLOCK(sunis_lock);
37708
37709
37710 #define ADD_LIMITED(s,v) \
37711- atomic_add((v),&stats->s); \
37712- if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
37713+ atomic_add_unchecked((v),&stats->s); \
37714+ if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
37715
37716
37717 static void suni_hz(unsigned long from_timer)
37718diff --git a/drivers/atm/uPD98402.c b/drivers/atm/uPD98402.c
37719index 5120a96..e2572bd 100644
37720--- a/drivers/atm/uPD98402.c
37721+++ b/drivers/atm/uPD98402.c
37722@@ -42,7 +42,7 @@ static int fetch_stats(struct atm_dev *dev,struct sonet_stats __user *arg,int ze
37723 struct sonet_stats tmp;
37724 int error = 0;
37725
37726- atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
37727+ atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
37728 sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
37729 if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
37730 if (zero && !error) {
37731@@ -161,9 +161,9 @@ static int uPD98402_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
37732
37733
37734 #define ADD_LIMITED(s,v) \
37735- { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
37736- if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
37737- atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
37738+ { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
37739+ if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
37740+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
37741
37742
37743 static void stat_event(struct atm_dev *dev)
37744@@ -194,7 +194,7 @@ static void uPD98402_int(struct atm_dev *dev)
37745 if (reason & uPD98402_INT_PFM) stat_event(dev);
37746 if (reason & uPD98402_INT_PCO) {
37747 (void) GET(PCOCR); /* clear interrupt cause */
37748- atomic_add(GET(HECCT),
37749+ atomic_add_unchecked(GET(HECCT),
37750 &PRIV(dev)->sonet_stats.uncorr_hcs);
37751 }
37752 if ((reason & uPD98402_INT_RFO) &&
37753@@ -222,9 +222,9 @@ static int uPD98402_start(struct atm_dev *dev)
37754 PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
37755 uPD98402_INT_LOS),PIMR); /* enable them */
37756 (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
37757- atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
37758- atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
37759- atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
37760+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
37761+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
37762+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
37763 return 0;
37764 }
37765
37766diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
37767index 969c3c2..9b72956 100644
37768--- a/drivers/atm/zatm.c
37769+++ b/drivers/atm/zatm.c
37770@@ -459,7 +459,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
37771 }
37772 if (!size) {
37773 dev_kfree_skb_irq(skb);
37774- if (vcc) atomic_inc(&vcc->stats->rx_err);
37775+ if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
37776 continue;
37777 }
37778 if (!atm_charge(vcc,skb->truesize)) {
37779@@ -469,7 +469,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
37780 skb->len = size;
37781 ATM_SKB(skb)->vcc = vcc;
37782 vcc->push(vcc,skb);
37783- atomic_inc(&vcc->stats->rx);
37784+ atomic_inc_unchecked(&vcc->stats->rx);
37785 }
37786 zout(pos & 0xffff,MTA(mbx));
37787 #if 0 /* probably a stupid idea */
37788@@ -733,7 +733,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD_V | uPD98401_TXPD_DP |
37789 skb_queue_head(&zatm_vcc->backlog,skb);
37790 break;
37791 }
37792- atomic_inc(&vcc->stats->tx);
37793+ atomic_inc_unchecked(&vcc->stats->tx);
37794 wake_up(&zatm_vcc->tx_wait);
37795 }
37796
37797diff --git a/drivers/base/bus.c b/drivers/base/bus.c
37798index 59dc808..f10c74e 100644
37799--- a/drivers/base/bus.c
37800+++ b/drivers/base/bus.c
37801@@ -1124,7 +1124,7 @@ int subsys_interface_register(struct subsys_interface *sif)
37802 return -EINVAL;
37803
37804 mutex_lock(&subsys->p->mutex);
37805- list_add_tail(&sif->node, &subsys->p->interfaces);
37806+ pax_list_add_tail((struct list_head *)&sif->node, &subsys->p->interfaces);
37807 if (sif->add_dev) {
37808 subsys_dev_iter_init(&iter, subsys, NULL, NULL);
37809 while ((dev = subsys_dev_iter_next(&iter)))
37810@@ -1149,7 +1149,7 @@ void subsys_interface_unregister(struct subsys_interface *sif)
37811 subsys = sif->subsys;
37812
37813 mutex_lock(&subsys->p->mutex);
37814- list_del_init(&sif->node);
37815+ pax_list_del_init((struct list_head *)&sif->node);
37816 if (sif->remove_dev) {
37817 subsys_dev_iter_init(&iter, subsys, NULL, NULL);
37818 while ((dev = subsys_dev_iter_next(&iter)))
37819diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
37820index 25798db..15f130e 100644
37821--- a/drivers/base/devtmpfs.c
37822+++ b/drivers/base/devtmpfs.c
37823@@ -354,7 +354,7 @@ int devtmpfs_mount(const char *mntdir)
37824 if (!thread)
37825 return 0;
37826
37827- err = sys_mount("devtmpfs", (char *)mntdir, "devtmpfs", MS_SILENT, NULL);
37828+ err = sys_mount((char __force_user *)"devtmpfs", (char __force_user *)mntdir, (char __force_user *)"devtmpfs", MS_SILENT, NULL);
37829 if (err)
37830 printk(KERN_INFO "devtmpfs: error mounting %i\n", err);
37831 else
37832@@ -380,11 +380,11 @@ static int devtmpfsd(void *p)
37833 *err = sys_unshare(CLONE_NEWNS);
37834 if (*err)
37835 goto out;
37836- *err = sys_mount("devtmpfs", "/", "devtmpfs", MS_SILENT, options);
37837+ *err = sys_mount((char __force_user *)"devtmpfs", (char __force_user *)"/", (char __force_user *)"devtmpfs", MS_SILENT, (char __force_user *)options);
37838 if (*err)
37839 goto out;
37840- sys_chdir("/.."); /* will traverse into overmounted root */
37841- sys_chroot(".");
37842+ sys_chdir((char __force_user *)"/.."); /* will traverse into overmounted root */
37843+ sys_chroot((char __force_user *)".");
37844 complete(&setup_done);
37845 while (1) {
37846 spin_lock(&req_lock);
37847diff --git a/drivers/base/node.c b/drivers/base/node.c
37848index bc9f43b..29703b8 100644
37849--- a/drivers/base/node.c
37850+++ b/drivers/base/node.c
37851@@ -620,7 +620,7 @@ static ssize_t print_nodes_state(enum node_states state, char *buf)
37852 struct node_attr {
37853 struct device_attribute attr;
37854 enum node_states state;
37855-};
37856+} __do_const;
37857
37858 static ssize_t show_node_state(struct device *dev,
37859 struct device_attribute *attr, char *buf)
37860diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
37861index bfb8955..4ebff34 100644
37862--- a/drivers/base/power/domain.c
37863+++ b/drivers/base/power/domain.c
37864@@ -1809,9 +1809,9 @@ int __pm_genpd_remove_callbacks(struct device *dev, bool clear_td)
37865
37866 if (dev->power.subsys_data->domain_data) {
37867 gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
37868- gpd_data->ops = (struct gpd_dev_ops){ NULL };
37869+ memset(&gpd_data->ops, 0, sizeof(gpd_data->ops));
37870 if (clear_td)
37871- gpd_data->td = (struct gpd_timing_data){ 0 };
37872+ memset(&gpd_data->td, 0, sizeof(gpd_data->td));
37873
37874 if (--gpd_data->refcount == 0) {
37875 dev->power.subsys_data->domain_data = NULL;
37876@@ -1850,7 +1850,7 @@ int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state)
37877 {
37878 struct cpuidle_driver *cpuidle_drv;
37879 struct gpd_cpu_data *cpu_data;
37880- struct cpuidle_state *idle_state;
37881+ cpuidle_state_no_const *idle_state;
37882 int ret = 0;
37883
37884 if (IS_ERR_OR_NULL(genpd) || state < 0)
37885@@ -1918,7 +1918,7 @@ int pm_genpd_name_attach_cpuidle(const char *name, int state)
37886 int pm_genpd_detach_cpuidle(struct generic_pm_domain *genpd)
37887 {
37888 struct gpd_cpu_data *cpu_data;
37889- struct cpuidle_state *idle_state;
37890+ cpuidle_state_no_const *idle_state;
37891 int ret = 0;
37892
37893 if (IS_ERR_OR_NULL(genpd))
37894diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c
37895index 03e089a..0e9560c 100644
37896--- a/drivers/base/power/sysfs.c
37897+++ b/drivers/base/power/sysfs.c
37898@@ -185,7 +185,7 @@ static ssize_t rtpm_status_show(struct device *dev,
37899 return -EIO;
37900 }
37901 }
37902- return sprintf(buf, p);
37903+ return sprintf(buf, "%s", p);
37904 }
37905
37906 static DEVICE_ATTR(runtime_status, 0444, rtpm_status_show, NULL);
37907diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
37908index 2d56f41..8830f19 100644
37909--- a/drivers/base/power/wakeup.c
37910+++ b/drivers/base/power/wakeup.c
37911@@ -29,14 +29,14 @@ bool events_check_enabled __read_mostly;
37912 * They need to be modified together atomically, so it's better to use one
37913 * atomic variable to hold them both.
37914 */
37915-static atomic_t combined_event_count = ATOMIC_INIT(0);
37916+static atomic_unchecked_t combined_event_count = ATOMIC_INIT(0);
37917
37918 #define IN_PROGRESS_BITS (sizeof(int) * 4)
37919 #define MAX_IN_PROGRESS ((1 << IN_PROGRESS_BITS) - 1)
37920
37921 static void split_counters(unsigned int *cnt, unsigned int *inpr)
37922 {
37923- unsigned int comb = atomic_read(&combined_event_count);
37924+ unsigned int comb = atomic_read_unchecked(&combined_event_count);
37925
37926 *cnt = (comb >> IN_PROGRESS_BITS);
37927 *inpr = comb & MAX_IN_PROGRESS;
37928@@ -395,7 +395,7 @@ static void wakeup_source_activate(struct wakeup_source *ws)
37929 ws->start_prevent_time = ws->last_time;
37930
37931 /* Increment the counter of events in progress. */
37932- cec = atomic_inc_return(&combined_event_count);
37933+ cec = atomic_inc_return_unchecked(&combined_event_count);
37934
37935 trace_wakeup_source_activate(ws->name, cec);
37936 }
37937@@ -521,7 +521,7 @@ static void wakeup_source_deactivate(struct wakeup_source *ws)
37938 * Increment the counter of registered wakeup events and decrement the
37939 * couter of wakeup events in progress simultaneously.
37940 */
37941- cec = atomic_add_return(MAX_IN_PROGRESS, &combined_event_count);
37942+ cec = atomic_add_return_unchecked(MAX_IN_PROGRESS, &combined_event_count);
37943 trace_wakeup_source_deactivate(ws->name, cec);
37944
37945 split_counters(&cnt, &inpr);
37946diff --git a/drivers/base/syscore.c b/drivers/base/syscore.c
37947index e8d11b6..7b1b36f 100644
37948--- a/drivers/base/syscore.c
37949+++ b/drivers/base/syscore.c
37950@@ -21,7 +21,7 @@ static DEFINE_MUTEX(syscore_ops_lock);
37951 void register_syscore_ops(struct syscore_ops *ops)
37952 {
37953 mutex_lock(&syscore_ops_lock);
37954- list_add_tail(&ops->node, &syscore_ops_list);
37955+ pax_list_add_tail((struct list_head *)&ops->node, &syscore_ops_list);
37956 mutex_unlock(&syscore_ops_lock);
37957 }
37958 EXPORT_SYMBOL_GPL(register_syscore_ops);
37959@@ -33,7 +33,7 @@ EXPORT_SYMBOL_GPL(register_syscore_ops);
37960 void unregister_syscore_ops(struct syscore_ops *ops)
37961 {
37962 mutex_lock(&syscore_ops_lock);
37963- list_del(&ops->node);
37964+ pax_list_del((struct list_head *)&ops->node);
37965 mutex_unlock(&syscore_ops_lock);
37966 }
37967 EXPORT_SYMBOL_GPL(unregister_syscore_ops);
37968diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
37969index 036e8ab..6221dec 100644
37970--- a/drivers/block/cciss.c
37971+++ b/drivers/block/cciss.c
37972@@ -3011,7 +3011,7 @@ static void start_io(ctlr_info_t *h)
37973 while (!list_empty(&h->reqQ)) {
37974 c = list_entry(h->reqQ.next, CommandList_struct, list);
37975 /* can't do anything if fifo is full */
37976- if ((h->access.fifo_full(h))) {
37977+ if ((h->access->fifo_full(h))) {
37978 dev_warn(&h->pdev->dev, "fifo full\n");
37979 break;
37980 }
37981@@ -3021,7 +3021,7 @@ static void start_io(ctlr_info_t *h)
37982 h->Qdepth--;
37983
37984 /* Tell the controller execute command */
37985- h->access.submit_command(h, c);
37986+ h->access->submit_command(h, c);
37987
37988 /* Put job onto the completed Q */
37989 addQ(&h->cmpQ, c);
37990@@ -3447,17 +3447,17 @@ startio:
37991
37992 static inline unsigned long get_next_completion(ctlr_info_t *h)
37993 {
37994- return h->access.command_completed(h);
37995+ return h->access->command_completed(h);
37996 }
37997
37998 static inline int interrupt_pending(ctlr_info_t *h)
37999 {
38000- return h->access.intr_pending(h);
38001+ return h->access->intr_pending(h);
38002 }
38003
38004 static inline long interrupt_not_for_us(ctlr_info_t *h)
38005 {
38006- return ((h->access.intr_pending(h) == 0) ||
38007+ return ((h->access->intr_pending(h) == 0) ||
38008 (h->interrupts_enabled == 0));
38009 }
38010
38011@@ -3490,7 +3490,7 @@ static inline u32 next_command(ctlr_info_t *h)
38012 u32 a;
38013
38014 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
38015- return h->access.command_completed(h);
38016+ return h->access->command_completed(h);
38017
38018 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
38019 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
38020@@ -4047,7 +4047,7 @@ static void cciss_put_controller_into_performant_mode(ctlr_info_t *h)
38021 trans_support & CFGTBL_Trans_use_short_tags);
38022
38023 /* Change the access methods to the performant access methods */
38024- h->access = SA5_performant_access;
38025+ h->access = &SA5_performant_access;
38026 h->transMethod = CFGTBL_Trans_Performant;
38027
38028 return;
38029@@ -4327,7 +4327,7 @@ static int cciss_pci_init(ctlr_info_t *h)
38030 if (prod_index < 0)
38031 return -ENODEV;
38032 h->product_name = products[prod_index].product_name;
38033- h->access = *(products[prod_index].access);
38034+ h->access = products[prod_index].access;
38035
38036 if (cciss_board_disabled(h)) {
38037 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
38038@@ -5059,7 +5059,7 @@ reinit_after_soft_reset:
38039 }
38040
38041 /* make sure the board interrupts are off */
38042- h->access.set_intr_mask(h, CCISS_INTR_OFF);
38043+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
38044 rc = cciss_request_irq(h, do_cciss_msix_intr, do_cciss_intx);
38045 if (rc)
38046 goto clean2;
38047@@ -5109,7 +5109,7 @@ reinit_after_soft_reset:
38048 * fake ones to scoop up any residual completions.
38049 */
38050 spin_lock_irqsave(&h->lock, flags);
38051- h->access.set_intr_mask(h, CCISS_INTR_OFF);
38052+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
38053 spin_unlock_irqrestore(&h->lock, flags);
38054 free_irq(h->intr[h->intr_mode], h);
38055 rc = cciss_request_irq(h, cciss_msix_discard_completions,
38056@@ -5129,9 +5129,9 @@ reinit_after_soft_reset:
38057 dev_info(&h->pdev->dev, "Board READY.\n");
38058 dev_info(&h->pdev->dev,
38059 "Waiting for stale completions to drain.\n");
38060- h->access.set_intr_mask(h, CCISS_INTR_ON);
38061+ h->access->set_intr_mask(h, CCISS_INTR_ON);
38062 msleep(10000);
38063- h->access.set_intr_mask(h, CCISS_INTR_OFF);
38064+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
38065
38066 rc = controller_reset_failed(h->cfgtable);
38067 if (rc)
38068@@ -5154,7 +5154,7 @@ reinit_after_soft_reset:
38069 cciss_scsi_setup(h);
38070
38071 /* Turn the interrupts on so we can service requests */
38072- h->access.set_intr_mask(h, CCISS_INTR_ON);
38073+ h->access->set_intr_mask(h, CCISS_INTR_ON);
38074
38075 /* Get the firmware version */
38076 inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
38077@@ -5226,7 +5226,7 @@ static void cciss_shutdown(struct pci_dev *pdev)
38078 kfree(flush_buf);
38079 if (return_code != IO_OK)
38080 dev_warn(&h->pdev->dev, "Error flushing cache\n");
38081- h->access.set_intr_mask(h, CCISS_INTR_OFF);
38082+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
38083 free_irq(h->intr[h->intr_mode], h);
38084 }
38085
38086diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h
38087index 7fda30e..2f27946 100644
38088--- a/drivers/block/cciss.h
38089+++ b/drivers/block/cciss.h
38090@@ -101,7 +101,7 @@ struct ctlr_info
38091 /* information about each logical volume */
38092 drive_info_struct *drv[CISS_MAX_LUN];
38093
38094- struct access_method access;
38095+ struct access_method *access;
38096
38097 /* queue and queue Info */
38098 struct list_head reqQ;
38099@@ -402,27 +402,27 @@ static bool SA5_performant_intr_pending(ctlr_info_t *h)
38100 }
38101
38102 static struct access_method SA5_access = {
38103- SA5_submit_command,
38104- SA5_intr_mask,
38105- SA5_fifo_full,
38106- SA5_intr_pending,
38107- SA5_completed,
38108+ .submit_command = SA5_submit_command,
38109+ .set_intr_mask = SA5_intr_mask,
38110+ .fifo_full = SA5_fifo_full,
38111+ .intr_pending = SA5_intr_pending,
38112+ .command_completed = SA5_completed,
38113 };
38114
38115 static struct access_method SA5B_access = {
38116- SA5_submit_command,
38117- SA5B_intr_mask,
38118- SA5_fifo_full,
38119- SA5B_intr_pending,
38120- SA5_completed,
38121+ .submit_command = SA5_submit_command,
38122+ .set_intr_mask = SA5B_intr_mask,
38123+ .fifo_full = SA5_fifo_full,
38124+ .intr_pending = SA5B_intr_pending,
38125+ .command_completed = SA5_completed,
38126 };
38127
38128 static struct access_method SA5_performant_access = {
38129- SA5_submit_command,
38130- SA5_performant_intr_mask,
38131- SA5_fifo_full,
38132- SA5_performant_intr_pending,
38133- SA5_performant_completed,
38134+ .submit_command = SA5_submit_command,
38135+ .set_intr_mask = SA5_performant_intr_mask,
38136+ .fifo_full = SA5_fifo_full,
38137+ .intr_pending = SA5_performant_intr_pending,
38138+ .command_completed = SA5_performant_completed,
38139 };
38140
38141 struct board_type {
38142diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
38143index 2b94403..fd6ad1f 100644
38144--- a/drivers/block/cpqarray.c
38145+++ b/drivers/block/cpqarray.c
38146@@ -404,7 +404,7 @@ static int cpqarray_register_ctlr(int i, struct pci_dev *pdev)
38147 if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
38148 goto Enomem4;
38149 }
38150- hba[i]->access.set_intr_mask(hba[i], 0);
38151+ hba[i]->access->set_intr_mask(hba[i], 0);
38152 if (request_irq(hba[i]->intr, do_ida_intr,
38153 IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i]))
38154 {
38155@@ -459,7 +459,7 @@ static int cpqarray_register_ctlr(int i, struct pci_dev *pdev)
38156 add_timer(&hba[i]->timer);
38157
38158 /* Enable IRQ now that spinlock and rate limit timer are set up */
38159- hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
38160+ hba[i]->access->set_intr_mask(hba[i], FIFO_NOT_EMPTY);
38161
38162 for(j=0; j<NWD; j++) {
38163 struct gendisk *disk = ida_gendisk[i][j];
38164@@ -694,7 +694,7 @@ DBGINFO(
38165 for(i=0; i<NR_PRODUCTS; i++) {
38166 if (board_id == products[i].board_id) {
38167 c->product_name = products[i].product_name;
38168- c->access = *(products[i].access);
38169+ c->access = products[i].access;
38170 break;
38171 }
38172 }
38173@@ -792,7 +792,7 @@ static int cpqarray_eisa_detect(void)
38174 hba[ctlr]->intr = intr;
38175 sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
38176 hba[ctlr]->product_name = products[j].product_name;
38177- hba[ctlr]->access = *(products[j].access);
38178+ hba[ctlr]->access = products[j].access;
38179 hba[ctlr]->ctlr = ctlr;
38180 hba[ctlr]->board_id = board_id;
38181 hba[ctlr]->pci_dev = NULL; /* not PCI */
38182@@ -978,7 +978,7 @@ static void start_io(ctlr_info_t *h)
38183
38184 while((c = h->reqQ) != NULL) {
38185 /* Can't do anything if we're busy */
38186- if (h->access.fifo_full(h) == 0)
38187+ if (h->access->fifo_full(h) == 0)
38188 return;
38189
38190 /* Get the first entry from the request Q */
38191@@ -986,7 +986,7 @@ static void start_io(ctlr_info_t *h)
38192 h->Qdepth--;
38193
38194 /* Tell the controller to do our bidding */
38195- h->access.submit_command(h, c);
38196+ h->access->submit_command(h, c);
38197
38198 /* Get onto the completion Q */
38199 addQ(&h->cmpQ, c);
38200@@ -1048,7 +1048,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
38201 unsigned long flags;
38202 __u32 a,a1;
38203
38204- istat = h->access.intr_pending(h);
38205+ istat = h->access->intr_pending(h);
38206 /* Is this interrupt for us? */
38207 if (istat == 0)
38208 return IRQ_NONE;
38209@@ -1059,7 +1059,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
38210 */
38211 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
38212 if (istat & FIFO_NOT_EMPTY) {
38213- while((a = h->access.command_completed(h))) {
38214+ while((a = h->access->command_completed(h))) {
38215 a1 = a; a &= ~3;
38216 if ((c = h->cmpQ) == NULL)
38217 {
38218@@ -1448,11 +1448,11 @@ static int sendcmd(
38219 /*
38220 * Disable interrupt
38221 */
38222- info_p->access.set_intr_mask(info_p, 0);
38223+ info_p->access->set_intr_mask(info_p, 0);
38224 /* Make sure there is room in the command FIFO */
38225 /* Actually it should be completely empty at this time. */
38226 for (i = 200000; i > 0; i--) {
38227- temp = info_p->access.fifo_full(info_p);
38228+ temp = info_p->access->fifo_full(info_p);
38229 if (temp != 0) {
38230 break;
38231 }
38232@@ -1465,7 +1465,7 @@ DBG(
38233 /*
38234 * Send the cmd
38235 */
38236- info_p->access.submit_command(info_p, c);
38237+ info_p->access->submit_command(info_p, c);
38238 complete = pollcomplete(ctlr);
38239
38240 pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr,
38241@@ -1548,9 +1548,9 @@ static int revalidate_allvol(ctlr_info_t *host)
38242 * we check the new geometry. Then turn interrupts back on when
38243 * we're done.
38244 */
38245- host->access.set_intr_mask(host, 0);
38246+ host->access->set_intr_mask(host, 0);
38247 getgeometry(ctlr);
38248- host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
38249+ host->access->set_intr_mask(host, FIFO_NOT_EMPTY);
38250
38251 for(i=0; i<NWD; i++) {
38252 struct gendisk *disk = ida_gendisk[ctlr][i];
38253@@ -1590,7 +1590,7 @@ static int pollcomplete(int ctlr)
38254 /* Wait (up to 2 seconds) for a command to complete */
38255
38256 for (i = 200000; i > 0; i--) {
38257- done = hba[ctlr]->access.command_completed(hba[ctlr]);
38258+ done = hba[ctlr]->access->command_completed(hba[ctlr]);
38259 if (done == 0) {
38260 udelay(10); /* a short fixed delay */
38261 } else
38262diff --git a/drivers/block/cpqarray.h b/drivers/block/cpqarray.h
38263index be73e9d..7fbf140 100644
38264--- a/drivers/block/cpqarray.h
38265+++ b/drivers/block/cpqarray.h
38266@@ -99,7 +99,7 @@ struct ctlr_info {
38267 drv_info_t drv[NWD];
38268 struct proc_dir_entry *proc;
38269
38270- struct access_method access;
38271+ struct access_method *access;
38272
38273 cmdlist_t *reqQ;
38274 cmdlist_t *cmpQ;
38275diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
38276index 0e06f0c..c47b81d 100644
38277--- a/drivers/block/drbd/drbd_int.h
38278+++ b/drivers/block/drbd/drbd_int.h
38279@@ -582,7 +582,7 @@ struct drbd_epoch {
38280 struct drbd_tconn *tconn;
38281 struct list_head list;
38282 unsigned int barrier_nr;
38283- atomic_t epoch_size; /* increased on every request added. */
38284+ atomic_unchecked_t epoch_size; /* increased on every request added. */
38285 atomic_t active; /* increased on every req. added, and dec on every finished. */
38286 unsigned long flags;
38287 };
38288@@ -1022,7 +1022,7 @@ struct drbd_conf {
38289 unsigned int al_tr_number;
38290 int al_tr_cycle;
38291 wait_queue_head_t seq_wait;
38292- atomic_t packet_seq;
38293+ atomic_unchecked_t packet_seq;
38294 unsigned int peer_seq;
38295 spinlock_t peer_seq_lock;
38296 unsigned int minor;
38297@@ -1573,7 +1573,7 @@ static inline int drbd_setsockopt(struct socket *sock, int level, int optname,
38298 char __user *uoptval;
38299 int err;
38300
38301- uoptval = (char __user __force *)optval;
38302+ uoptval = (char __force_user *)optval;
38303
38304 set_fs(KERNEL_DS);
38305 if (level == SOL_SOCKET)
38306diff --git a/drivers/block/drbd/drbd_interval.c b/drivers/block/drbd/drbd_interval.c
38307index 89c497c..9c736ae 100644
38308--- a/drivers/block/drbd/drbd_interval.c
38309+++ b/drivers/block/drbd/drbd_interval.c
38310@@ -67,9 +67,9 @@ static void augment_rotate(struct rb_node *rb_old, struct rb_node *rb_new)
38311 }
38312
38313 static const struct rb_augment_callbacks augment_callbacks = {
38314- augment_propagate,
38315- augment_copy,
38316- augment_rotate,
38317+ .propagate = augment_propagate,
38318+ .copy = augment_copy,
38319+ .rotate = augment_rotate,
38320 };
38321
38322 /**
38323diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
38324index 929468e..7d934eb 100644
38325--- a/drivers/block/drbd/drbd_main.c
38326+++ b/drivers/block/drbd/drbd_main.c
38327@@ -1317,7 +1317,7 @@ static int _drbd_send_ack(struct drbd_conf *mdev, enum drbd_packet cmd,
38328 p->sector = sector;
38329 p->block_id = block_id;
38330 p->blksize = blksize;
38331- p->seq_num = cpu_to_be32(atomic_inc_return(&mdev->packet_seq));
38332+ p->seq_num = cpu_to_be32(atomic_inc_return_unchecked(&mdev->packet_seq));
38333 return drbd_send_command(mdev, sock, cmd, sizeof(*p), NULL, 0);
38334 }
38335
38336@@ -1622,7 +1622,7 @@ int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req)
38337 return -EIO;
38338 p->sector = cpu_to_be64(req->i.sector);
38339 p->block_id = (unsigned long)req;
38340- p->seq_num = cpu_to_be32(atomic_inc_return(&mdev->packet_seq));
38341+ p->seq_num = cpu_to_be32(atomic_inc_return_unchecked(&mdev->packet_seq));
38342 dp_flags = bio_flags_to_wire(mdev, req->master_bio->bi_rw);
38343 if (mdev->state.conn >= C_SYNC_SOURCE &&
38344 mdev->state.conn <= C_PAUSED_SYNC_T)
38345@@ -2577,8 +2577,8 @@ void conn_destroy(struct kref *kref)
38346 {
38347 struct drbd_tconn *tconn = container_of(kref, struct drbd_tconn, kref);
38348
38349- if (atomic_read(&tconn->current_epoch->epoch_size) != 0)
38350- conn_err(tconn, "epoch_size:%d\n", atomic_read(&tconn->current_epoch->epoch_size));
38351+ if (atomic_read_unchecked(&tconn->current_epoch->epoch_size) != 0)
38352+ conn_err(tconn, "epoch_size:%d\n", atomic_read_unchecked(&tconn->current_epoch->epoch_size));
38353 kfree(tconn->current_epoch);
38354
38355 idr_destroy(&tconn->volumes);
38356diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
38357index c706d50..5e1b472 100644
38358--- a/drivers/block/drbd/drbd_nl.c
38359+++ b/drivers/block/drbd/drbd_nl.c
38360@@ -3440,7 +3440,7 @@ out:
38361
38362 void drbd_bcast_event(struct drbd_conf *mdev, const struct sib_info *sib)
38363 {
38364- static atomic_t drbd_genl_seq = ATOMIC_INIT(2); /* two. */
38365+ static atomic_unchecked_t drbd_genl_seq = ATOMIC_INIT(2); /* two. */
38366 struct sk_buff *msg;
38367 struct drbd_genlmsghdr *d_out;
38368 unsigned seq;
38369@@ -3453,7 +3453,7 @@ void drbd_bcast_event(struct drbd_conf *mdev, const struct sib_info *sib)
38370 return;
38371 }
38372
38373- seq = atomic_inc_return(&drbd_genl_seq);
38374+ seq = atomic_inc_return_unchecked(&drbd_genl_seq);
38375 msg = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO);
38376 if (!msg)
38377 goto failed;
38378diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
38379index d073305..4998fea 100644
38380--- a/drivers/block/drbd/drbd_receiver.c
38381+++ b/drivers/block/drbd/drbd_receiver.c
38382@@ -834,7 +834,7 @@ int drbd_connected(struct drbd_conf *mdev)
38383 {
38384 int err;
38385
38386- atomic_set(&mdev->packet_seq, 0);
38387+ atomic_set_unchecked(&mdev->packet_seq, 0);
38388 mdev->peer_seq = 0;
38389
38390 mdev->state_mutex = mdev->tconn->agreed_pro_version < 100 ?
38391@@ -1193,7 +1193,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_tconn *tconn,
38392 do {
38393 next_epoch = NULL;
38394
38395- epoch_size = atomic_read(&epoch->epoch_size);
38396+ epoch_size = atomic_read_unchecked(&epoch->epoch_size);
38397
38398 switch (ev & ~EV_CLEANUP) {
38399 case EV_PUT:
38400@@ -1233,7 +1233,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_tconn *tconn,
38401 rv = FE_DESTROYED;
38402 } else {
38403 epoch->flags = 0;
38404- atomic_set(&epoch->epoch_size, 0);
38405+ atomic_set_unchecked(&epoch->epoch_size, 0);
38406 /* atomic_set(&epoch->active, 0); is already zero */
38407 if (rv == FE_STILL_LIVE)
38408 rv = FE_RECYCLED;
38409@@ -1451,7 +1451,7 @@ static int receive_Barrier(struct drbd_tconn *tconn, struct packet_info *pi)
38410 conn_wait_active_ee_empty(tconn);
38411 drbd_flush(tconn);
38412
38413- if (atomic_read(&tconn->current_epoch->epoch_size)) {
38414+ if (atomic_read_unchecked(&tconn->current_epoch->epoch_size)) {
38415 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
38416 if (epoch)
38417 break;
38418@@ -1464,11 +1464,11 @@ static int receive_Barrier(struct drbd_tconn *tconn, struct packet_info *pi)
38419 }
38420
38421 epoch->flags = 0;
38422- atomic_set(&epoch->epoch_size, 0);
38423+ atomic_set_unchecked(&epoch->epoch_size, 0);
38424 atomic_set(&epoch->active, 0);
38425
38426 spin_lock(&tconn->epoch_lock);
38427- if (atomic_read(&tconn->current_epoch->epoch_size)) {
38428+ if (atomic_read_unchecked(&tconn->current_epoch->epoch_size)) {
38429 list_add(&epoch->list, &tconn->current_epoch->list);
38430 tconn->current_epoch = epoch;
38431 tconn->epochs++;
38432@@ -2164,7 +2164,7 @@ static int receive_Data(struct drbd_tconn *tconn, struct packet_info *pi)
38433
38434 err = wait_for_and_update_peer_seq(mdev, peer_seq);
38435 drbd_send_ack_dp(mdev, P_NEG_ACK, p, pi->size);
38436- atomic_inc(&tconn->current_epoch->epoch_size);
38437+ atomic_inc_unchecked(&tconn->current_epoch->epoch_size);
38438 err2 = drbd_drain_block(mdev, pi->size);
38439 if (!err)
38440 err = err2;
38441@@ -2198,7 +2198,7 @@ static int receive_Data(struct drbd_tconn *tconn, struct packet_info *pi)
38442
38443 spin_lock(&tconn->epoch_lock);
38444 peer_req->epoch = tconn->current_epoch;
38445- atomic_inc(&peer_req->epoch->epoch_size);
38446+ atomic_inc_unchecked(&peer_req->epoch->epoch_size);
38447 atomic_inc(&peer_req->epoch->active);
38448 spin_unlock(&tconn->epoch_lock);
38449
38450@@ -4345,7 +4345,7 @@ struct data_cmd {
38451 int expect_payload;
38452 size_t pkt_size;
38453 int (*fn)(struct drbd_tconn *, struct packet_info *);
38454-};
38455+} __do_const;
38456
38457 static struct data_cmd drbd_cmd_handler[] = {
38458 [P_DATA] = { 1, sizeof(struct p_data), receive_Data },
38459@@ -4465,7 +4465,7 @@ static void conn_disconnect(struct drbd_tconn *tconn)
38460 if (!list_empty(&tconn->current_epoch->list))
38461 conn_err(tconn, "ASSERTION FAILED: tconn->current_epoch->list not empty\n");
38462 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
38463- atomic_set(&tconn->current_epoch->epoch_size, 0);
38464+ atomic_set_unchecked(&tconn->current_epoch->epoch_size, 0);
38465 tconn->send.seen_any_write_yet = false;
38466
38467 conn_info(tconn, "Connection closed\n");
38468@@ -5221,7 +5221,7 @@ static int tconn_finish_peer_reqs(struct drbd_tconn *tconn)
38469 struct asender_cmd {
38470 size_t pkt_size;
38471 int (*fn)(struct drbd_tconn *tconn, struct packet_info *);
38472-};
38473+} __do_const;
38474
38475 static struct asender_cmd asender_tbl[] = {
38476 [P_PING] = { 0, got_Ping },
38477diff --git a/drivers/block/loop.c b/drivers/block/loop.c
38478index 66e8c3b..9b68dd9 100644
38479--- a/drivers/block/loop.c
38480+++ b/drivers/block/loop.c
38481@@ -232,7 +232,7 @@ static int __do_lo_send_write(struct file *file,
38482
38483 file_start_write(file);
38484 set_fs(get_ds());
38485- bw = file->f_op->write(file, buf, len, &pos);
38486+ bw = file->f_op->write(file, (const char __force_user *)buf, len, &pos);
38487 set_fs(old_fs);
38488 file_end_write(file);
38489 if (likely(bw == len))
38490diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c
38491index 091b9ea..f5428f8 100644
38492--- a/drivers/block/null_blk.c
38493+++ b/drivers/block/null_blk.c
38494@@ -382,15 +382,25 @@ static int null_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
38495 return 0;
38496 }
38497
38498-static struct blk_mq_ops null_mq_ops = {
38499- .queue_rq = null_queue_rq,
38500- .map_queue = blk_mq_map_queue,
38501+static struct blk_mq_ops null_mq_single_ops = {
38502+ .queue_rq = null_queue_rq,
38503+ .map_queue = blk_mq_map_queue,
38504 .init_hctx = null_init_hctx,
38505 .complete = null_softirq_done_fn,
38506+ .alloc_hctx = blk_mq_alloc_single_hw_queue,
38507+ .free_hctx = blk_mq_free_single_hw_queue,
38508+};
38509+
38510+static struct blk_mq_ops null_mq_per_node_ops = {
38511+ .queue_rq = null_queue_rq,
38512+ .map_queue = blk_mq_map_queue,
38513+ .init_hctx = null_init_hctx,
38514+ .alloc_hctx = null_alloc_hctx,
38515+ .free_hctx = null_free_hctx,
38516 };
38517
38518 static struct blk_mq_reg null_mq_reg = {
38519- .ops = &null_mq_ops,
38520+ .ops = &null_mq_single_ops,
38521 .queue_depth = 64,
38522 .cmd_size = sizeof(struct nullb_cmd),
38523 .flags = BLK_MQ_F_SHOULD_MERGE,
38524@@ -521,13 +531,8 @@ static int null_add_dev(void)
38525 null_mq_reg.queue_depth = hw_queue_depth;
38526 null_mq_reg.nr_hw_queues = submit_queues;
38527
38528- if (use_per_node_hctx) {
38529- null_mq_reg.ops->alloc_hctx = null_alloc_hctx;
38530- null_mq_reg.ops->free_hctx = null_free_hctx;
38531- } else {
38532- null_mq_reg.ops->alloc_hctx = blk_mq_alloc_single_hw_queue;
38533- null_mq_reg.ops->free_hctx = blk_mq_free_single_hw_queue;
38534- }
38535+ if (use_per_node_hctx)
38536+ null_mq_reg.ops = &null_mq_per_node_ops;
38537
38538 nullb->q = blk_mq_init_queue(&null_mq_reg, nullb);
38539 } else if (queue_mode == NULL_Q_BIO) {
38540diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
38541index a2af73d..c0b8f61 100644
38542--- a/drivers/block/pktcdvd.c
38543+++ b/drivers/block/pktcdvd.c
38544@@ -108,7 +108,7 @@ static int pkt_seq_show(struct seq_file *m, void *p);
38545
38546 static sector_t get_zone(sector_t sector, struct pktcdvd_device *pd)
38547 {
38548- return (sector + pd->offset) & ~(sector_t)(pd->settings.size - 1);
38549+ return (sector + pd->offset) & ~(sector_t)(pd->settings.size - 1UL);
38550 }
38551
38552 /*
38553@@ -1888,7 +1888,7 @@ static noinline_for_stack int pkt_probe_settings(struct pktcdvd_device *pd)
38554 return -EROFS;
38555 }
38556 pd->settings.fp = ti.fp;
38557- pd->offset = (be32_to_cpu(ti.track_start) << 2) & (pd->settings.size - 1);
38558+ pd->offset = (be32_to_cpu(ti.track_start) << 2) & (pd->settings.size - 1UL);
38559
38560 if (ti.nwa_v) {
38561 pd->nwa = be32_to_cpu(ti.next_writable);
38562diff --git a/drivers/block/smart1,2.h b/drivers/block/smart1,2.h
38563index e5565fb..71be10b4 100644
38564--- a/drivers/block/smart1,2.h
38565+++ b/drivers/block/smart1,2.h
38566@@ -108,11 +108,11 @@ static unsigned long smart4_intr_pending(ctlr_info_t *h)
38567 }
38568
38569 static struct access_method smart4_access = {
38570- smart4_submit_command,
38571- smart4_intr_mask,
38572- smart4_fifo_full,
38573- smart4_intr_pending,
38574- smart4_completed,
38575+ .submit_command = smart4_submit_command,
38576+ .set_intr_mask = smart4_intr_mask,
38577+ .fifo_full = smart4_fifo_full,
38578+ .intr_pending = smart4_intr_pending,
38579+ .command_completed = smart4_completed,
38580 };
38581
38582 /*
38583@@ -144,11 +144,11 @@ static unsigned long smart2_intr_pending(ctlr_info_t *h)
38584 }
38585
38586 static struct access_method smart2_access = {
38587- smart2_submit_command,
38588- smart2_intr_mask,
38589- smart2_fifo_full,
38590- smart2_intr_pending,
38591- smart2_completed,
38592+ .submit_command = smart2_submit_command,
38593+ .set_intr_mask = smart2_intr_mask,
38594+ .fifo_full = smart2_fifo_full,
38595+ .intr_pending = smart2_intr_pending,
38596+ .command_completed = smart2_completed,
38597 };
38598
38599 /*
38600@@ -180,11 +180,11 @@ static unsigned long smart2e_intr_pending(ctlr_info_t *h)
38601 }
38602
38603 static struct access_method smart2e_access = {
38604- smart2e_submit_command,
38605- smart2e_intr_mask,
38606- smart2e_fifo_full,
38607- smart2e_intr_pending,
38608- smart2e_completed,
38609+ .submit_command = smart2e_submit_command,
38610+ .set_intr_mask = smart2e_intr_mask,
38611+ .fifo_full = smart2e_fifo_full,
38612+ .intr_pending = smart2e_intr_pending,
38613+ .command_completed = smart2e_completed,
38614 };
38615
38616 /*
38617@@ -270,9 +270,9 @@ static unsigned long smart1_intr_pending(ctlr_info_t *h)
38618 }
38619
38620 static struct access_method smart1_access = {
38621- smart1_submit_command,
38622- smart1_intr_mask,
38623- smart1_fifo_full,
38624- smart1_intr_pending,
38625- smart1_completed,
38626+ .submit_command = smart1_submit_command,
38627+ .set_intr_mask = smart1_intr_mask,
38628+ .fifo_full = smart1_fifo_full,
38629+ .intr_pending = smart1_intr_pending,
38630+ .command_completed = smart1_completed,
38631 };
38632diff --git a/drivers/bluetooth/btwilink.c b/drivers/bluetooth/btwilink.c
38633index f038dba..bb74c08 100644
38634--- a/drivers/bluetooth/btwilink.c
38635+++ b/drivers/bluetooth/btwilink.c
38636@@ -288,7 +288,7 @@ static int ti_st_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
38637
38638 static int bt_ti_probe(struct platform_device *pdev)
38639 {
38640- static struct ti_st *hst;
38641+ struct ti_st *hst;
38642 struct hci_dev *hdev;
38643 int err;
38644
38645diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
38646index 8a3aff7..d7538c2 100644
38647--- a/drivers/cdrom/cdrom.c
38648+++ b/drivers/cdrom/cdrom.c
38649@@ -416,7 +416,6 @@ int register_cdrom(struct cdrom_device_info *cdi)
38650 ENSURE(reset, CDC_RESET);
38651 ENSURE(generic_packet, CDC_GENERIC_PACKET);
38652 cdi->mc_flags = 0;
38653- cdo->n_minors = 0;
38654 cdi->options = CDO_USE_FFLAGS;
38655
38656 if (autoclose==1 && CDROM_CAN(CDC_CLOSE_TRAY))
38657@@ -436,8 +435,11 @@ int register_cdrom(struct cdrom_device_info *cdi)
38658 else
38659 cdi->cdda_method = CDDA_OLD;
38660
38661- if (!cdo->generic_packet)
38662- cdo->generic_packet = cdrom_dummy_generic_packet;
38663+ if (!cdo->generic_packet) {
38664+ pax_open_kernel();
38665+ *(void **)&cdo->generic_packet = cdrom_dummy_generic_packet;
38666+ pax_close_kernel();
38667+ }
38668
38669 cdinfo(CD_REG_UNREG, "drive \"/dev/%s\" registered\n", cdi->name);
38670 mutex_lock(&cdrom_mutex);
38671@@ -458,7 +460,6 @@ void unregister_cdrom(struct cdrom_device_info *cdi)
38672 if (cdi->exit)
38673 cdi->exit(cdi);
38674
38675- cdi->ops->n_minors--;
38676 cdinfo(CD_REG_UNREG, "drive \"/dev/%s\" unregistered\n", cdi->name);
38677 }
38678
38679@@ -2107,7 +2108,7 @@ static int cdrom_read_cdda_old(struct cdrom_device_info *cdi, __u8 __user *ubuf,
38680 */
38681 nr = nframes;
38682 do {
38683- cgc.buffer = kmalloc(CD_FRAMESIZE_RAW * nr, GFP_KERNEL);
38684+ cgc.buffer = kzalloc(CD_FRAMESIZE_RAW * nr, GFP_KERNEL);
38685 if (cgc.buffer)
38686 break;
38687
38688@@ -3429,7 +3430,7 @@ static int cdrom_print_info(const char *header, int val, char *info,
38689 struct cdrom_device_info *cdi;
38690 int ret;
38691
38692- ret = scnprintf(info + *pos, max_size - *pos, header);
38693+ ret = scnprintf(info + *pos, max_size - *pos, "%s", header);
38694 if (!ret)
38695 return 1;
38696
38697diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c
38698index 51e75ad..39c4c76 100644
38699--- a/drivers/cdrom/gdrom.c
38700+++ b/drivers/cdrom/gdrom.c
38701@@ -491,7 +491,6 @@ static struct cdrom_device_ops gdrom_ops = {
38702 .audio_ioctl = gdrom_audio_ioctl,
38703 .capability = CDC_MULTI_SESSION | CDC_MEDIA_CHANGED |
38704 CDC_RESET | CDC_DRIVE_STATUS | CDC_CD_R,
38705- .n_minors = 1,
38706 };
38707
38708 static int gdrom_bdops_open(struct block_device *bdev, fmode_t mode)
38709diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
38710index 1386749..5430258 100644
38711--- a/drivers/char/Kconfig
38712+++ b/drivers/char/Kconfig
38713@@ -8,7 +8,8 @@ source "drivers/tty/Kconfig"
38714
38715 config DEVKMEM
38716 bool "/dev/kmem virtual device support"
38717- default y
38718+ default n
38719+ depends on !GRKERNSEC_KMEM
38720 help
38721 Say Y here if you want to support the /dev/kmem device. The
38722 /dev/kmem device is rarely used, but can be used for certain
38723@@ -577,6 +578,7 @@ config DEVPORT
38724 bool
38725 depends on !M68K
38726 depends on ISA || PCI
38727+ depends on !GRKERNSEC_KMEM
38728 default y
38729
38730 source "drivers/s390/char/Kconfig"
38731diff --git a/drivers/char/agp/compat_ioctl.c b/drivers/char/agp/compat_ioctl.c
38732index a48e05b..6bac831 100644
38733--- a/drivers/char/agp/compat_ioctl.c
38734+++ b/drivers/char/agp/compat_ioctl.c
38735@@ -108,7 +108,7 @@ static int compat_agpioc_reserve_wrap(struct agp_file_private *priv, void __user
38736 return -ENOMEM;
38737 }
38738
38739- if (copy_from_user(usegment, (void __user *) ureserve.seg_list,
38740+ if (copy_from_user(usegment, (void __force_user *) ureserve.seg_list,
38741 sizeof(*usegment) * ureserve.seg_count)) {
38742 kfree(usegment);
38743 kfree(ksegment);
38744diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c
38745index 1b19239..963967b 100644
38746--- a/drivers/char/agp/frontend.c
38747+++ b/drivers/char/agp/frontend.c
38748@@ -731,6 +731,7 @@ static int agpioc_info_wrap(struct agp_file_private *priv, void __user *arg)
38749
38750 agp_copy_info(agp_bridge, &kerninfo);
38751
38752+ memset(&userinfo, 0, sizeof(userinfo));
38753 userinfo.version.major = kerninfo.version.major;
38754 userinfo.version.minor = kerninfo.version.minor;
38755 userinfo.bridge_id = kerninfo.device->vendor |
38756@@ -819,7 +820,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
38757 if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
38758 return -EFAULT;
38759
38760- if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
38761+ if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
38762 return -EFAULT;
38763
38764 client = agp_find_client_by_pid(reserve.pid);
38765@@ -849,7 +850,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
38766 if (segment == NULL)
38767 return -ENOMEM;
38768
38769- if (copy_from_user(segment, (void __user *) reserve.seg_list,
38770+ if (copy_from_user(segment, (void __force_user *) reserve.seg_list,
38771 sizeof(struct agp_segment) * reserve.seg_count)) {
38772 kfree(segment);
38773 return -EFAULT;
38774diff --git a/drivers/char/genrtc.c b/drivers/char/genrtc.c
38775index 4f94375..413694e 100644
38776--- a/drivers/char/genrtc.c
38777+++ b/drivers/char/genrtc.c
38778@@ -273,6 +273,7 @@ static int gen_rtc_ioctl(struct file *file,
38779 switch (cmd) {
38780
38781 case RTC_PLL_GET:
38782+ memset(&pll, 0, sizeof(pll));
38783 if (get_rtc_pll(&pll))
38784 return -EINVAL;
38785 else
38786diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
38787index d5d4cd8..22d561d 100644
38788--- a/drivers/char/hpet.c
38789+++ b/drivers/char/hpet.c
38790@@ -575,7 +575,7 @@ static inline unsigned long hpet_time_div(struct hpets *hpets,
38791 }
38792
38793 static int
38794-hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg,
38795+hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg,
38796 struct hpet_info *info)
38797 {
38798 struct hpet_timer __iomem *timer;
38799diff --git a/drivers/char/hw_random/intel-rng.c b/drivers/char/hw_random/intel-rng.c
38800index 86fe45c..c0ea948 100644
38801--- a/drivers/char/hw_random/intel-rng.c
38802+++ b/drivers/char/hw_random/intel-rng.c
38803@@ -314,7 +314,7 @@ PFX "RNG, try using the 'no_fwh_detect' option.\n";
38804
38805 if (no_fwh_detect)
38806 return -ENODEV;
38807- printk(warning);
38808+ printk("%s", warning);
38809 return -EBUSY;
38810 }
38811
38812diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
38813index ec4e10f..f2a763b 100644
38814--- a/drivers/char/ipmi/ipmi_msghandler.c
38815+++ b/drivers/char/ipmi/ipmi_msghandler.c
38816@@ -420,7 +420,7 @@ struct ipmi_smi {
38817 struct proc_dir_entry *proc_dir;
38818 char proc_dir_name[10];
38819
38820- atomic_t stats[IPMI_NUM_STATS];
38821+ atomic_unchecked_t stats[IPMI_NUM_STATS];
38822
38823 /*
38824 * run_to_completion duplicate of smb_info, smi_info
38825@@ -453,9 +453,9 @@ static DEFINE_MUTEX(smi_watchers_mutex);
38826
38827
38828 #define ipmi_inc_stat(intf, stat) \
38829- atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
38830+ atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
38831 #define ipmi_get_stat(intf, stat) \
38832- ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
38833+ ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
38834
38835 static int is_lan_addr(struct ipmi_addr *addr)
38836 {
38837@@ -2883,7 +2883,7 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
38838 INIT_LIST_HEAD(&intf->cmd_rcvrs);
38839 init_waitqueue_head(&intf->waitq);
38840 for (i = 0; i < IPMI_NUM_STATS; i++)
38841- atomic_set(&intf->stats[i], 0);
38842+ atomic_set_unchecked(&intf->stats[i], 0);
38843
38844 intf->proc_dir = NULL;
38845
38846diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
38847index 8b4fa2c..5f81848 100644
38848--- a/drivers/char/ipmi/ipmi_si_intf.c
38849+++ b/drivers/char/ipmi/ipmi_si_intf.c
38850@@ -283,7 +283,7 @@ struct smi_info {
38851 unsigned char slave_addr;
38852
38853 /* Counters and things for the proc filesystem. */
38854- atomic_t stats[SI_NUM_STATS];
38855+ atomic_unchecked_t stats[SI_NUM_STATS];
38856
38857 struct task_struct *thread;
38858
38859@@ -292,9 +292,9 @@ struct smi_info {
38860 };
38861
38862 #define smi_inc_stat(smi, stat) \
38863- atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
38864+ atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
38865 #define smi_get_stat(smi, stat) \
38866- ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
38867+ ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
38868
38869 #define SI_MAX_PARMS 4
38870
38871@@ -3349,7 +3349,7 @@ static int try_smi_init(struct smi_info *new_smi)
38872 atomic_set(&new_smi->req_events, 0);
38873 new_smi->run_to_completion = 0;
38874 for (i = 0; i < SI_NUM_STATS; i++)
38875- atomic_set(&new_smi->stats[i], 0);
38876+ atomic_set_unchecked(&new_smi->stats[i], 0);
38877
38878 new_smi->interrupt_disabled = 1;
38879 atomic_set(&new_smi->stop_operation, 0);
38880diff --git a/drivers/char/mem.c b/drivers/char/mem.c
38881index 92c5937..1be4e4d 100644
38882--- a/drivers/char/mem.c
38883+++ b/drivers/char/mem.c
38884@@ -18,6 +18,7 @@
38885 #include <linux/raw.h>
38886 #include <linux/tty.h>
38887 #include <linux/capability.h>
38888+#include <linux/security.h>
38889 #include <linux/ptrace.h>
38890 #include <linux/device.h>
38891 #include <linux/highmem.h>
38892@@ -36,6 +37,10 @@
38893
38894 #define DEVPORT_MINOR 4
38895
38896+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
38897+extern const struct file_operations grsec_fops;
38898+#endif
38899+
38900 static inline unsigned long size_inside_page(unsigned long start,
38901 unsigned long size)
38902 {
38903@@ -67,9 +72,13 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
38904
38905 while (cursor < to) {
38906 if (!devmem_is_allowed(pfn)) {
38907+#ifdef CONFIG_GRKERNSEC_KMEM
38908+ gr_handle_mem_readwrite(from, to);
38909+#else
38910 printk(KERN_INFO
38911 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
38912 current->comm, from, to);
38913+#endif
38914 return 0;
38915 }
38916 cursor += PAGE_SIZE;
38917@@ -77,6 +86,11 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
38918 }
38919 return 1;
38920 }
38921+#elif defined(CONFIG_GRKERNSEC_KMEM)
38922+static inline int range_is_allowed(unsigned long pfn, unsigned long size)
38923+{
38924+ return 0;
38925+}
38926 #else
38927 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
38928 {
38929@@ -119,6 +133,7 @@ static ssize_t read_mem(struct file *file, char __user *buf,
38930
38931 while (count > 0) {
38932 unsigned long remaining;
38933+ char *temp;
38934
38935 sz = size_inside_page(p, count);
38936
38937@@ -134,7 +149,23 @@ static ssize_t read_mem(struct file *file, char __user *buf,
38938 if (!ptr)
38939 return -EFAULT;
38940
38941- remaining = copy_to_user(buf, ptr, sz);
38942+#ifdef CONFIG_PAX_USERCOPY
38943+ temp = kmalloc(sz, GFP_KERNEL|GFP_USERCOPY);
38944+ if (!temp) {
38945+ unxlate_dev_mem_ptr(p, ptr);
38946+ return -ENOMEM;
38947+ }
38948+ memcpy(temp, ptr, sz);
38949+#else
38950+ temp = ptr;
38951+#endif
38952+
38953+ remaining = copy_to_user(buf, temp, sz);
38954+
38955+#ifdef CONFIG_PAX_USERCOPY
38956+ kfree(temp);
38957+#endif
38958+
38959 unxlate_dev_mem_ptr(p, ptr);
38960 if (remaining)
38961 return -EFAULT;
38962@@ -363,9 +394,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
38963 size_t count, loff_t *ppos)
38964 {
38965 unsigned long p = *ppos;
38966- ssize_t low_count, read, sz;
38967+ ssize_t low_count, read, sz, err = 0;
38968 char *kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
38969- int err = 0;
38970
38971 read = 0;
38972 if (p < (unsigned long) high_memory) {
38973@@ -387,6 +417,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
38974 }
38975 #endif
38976 while (low_count > 0) {
38977+ char *temp;
38978+
38979 sz = size_inside_page(p, low_count);
38980
38981 /*
38982@@ -396,7 +428,22 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
38983 */
38984 kbuf = xlate_dev_kmem_ptr((char *)p);
38985
38986- if (copy_to_user(buf, kbuf, sz))
38987+#ifdef CONFIG_PAX_USERCOPY
38988+ temp = kmalloc(sz, GFP_KERNEL|GFP_USERCOPY);
38989+ if (!temp)
38990+ return -ENOMEM;
38991+ memcpy(temp, kbuf, sz);
38992+#else
38993+ temp = kbuf;
38994+#endif
38995+
38996+ err = copy_to_user(buf, temp, sz);
38997+
38998+#ifdef CONFIG_PAX_USERCOPY
38999+ kfree(temp);
39000+#endif
39001+
39002+ if (err)
39003 return -EFAULT;
39004 buf += sz;
39005 p += sz;
39006@@ -821,6 +868,9 @@ static const struct memdev {
39007 #ifdef CONFIG_PRINTK
39008 [11] = { "kmsg", 0644, &kmsg_fops, NULL },
39009 #endif
39010+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
39011+ [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, NULL },
39012+#endif
39013 };
39014
39015 static int memory_open(struct inode *inode, struct file *filp)
39016@@ -892,7 +942,7 @@ static int __init chr_dev_init(void)
39017 continue;
39018
39019 device_create(mem_class, NULL, MKDEV(MEM_MAJOR, minor),
39020- NULL, devlist[minor].name);
39021+ NULL, "%s", devlist[minor].name);
39022 }
39023
39024 return tty_init();
39025diff --git a/drivers/char/nvram.c b/drivers/char/nvram.c
39026index 9df78e2..01ba9ae 100644
39027--- a/drivers/char/nvram.c
39028+++ b/drivers/char/nvram.c
39029@@ -247,7 +247,7 @@ static ssize_t nvram_read(struct file *file, char __user *buf,
39030
39031 spin_unlock_irq(&rtc_lock);
39032
39033- if (copy_to_user(buf, contents, tmp - contents))
39034+ if (tmp - contents > sizeof(contents) || copy_to_user(buf, contents, tmp - contents))
39035 return -EFAULT;
39036
39037 *ppos = i;
39038diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c
39039index 8320abd..ec48108 100644
39040--- a/drivers/char/pcmcia/synclink_cs.c
39041+++ b/drivers/char/pcmcia/synclink_cs.c
39042@@ -2345,9 +2345,9 @@ static void mgslpc_close(struct tty_struct *tty, struct file * filp)
39043
39044 if (debug_level >= DEBUG_LEVEL_INFO)
39045 printk("%s(%d):mgslpc_close(%s) entry, count=%d\n",
39046- __FILE__, __LINE__, info->device_name, port->count);
39047+ __FILE__, __LINE__, info->device_name, atomic_read(&port->count));
39048
39049- WARN_ON(!port->count);
39050+ WARN_ON(!atomic_read(&port->count));
39051
39052 if (tty_port_close_start(port, tty, filp) == 0)
39053 goto cleanup;
39054@@ -2365,7 +2365,7 @@ static void mgslpc_close(struct tty_struct *tty, struct file * filp)
39055 cleanup:
39056 if (debug_level >= DEBUG_LEVEL_INFO)
39057 printk("%s(%d):mgslpc_close(%s) exit, count=%d\n", __FILE__, __LINE__,
39058- tty->driver->name, port->count);
39059+ tty->driver->name, atomic_read(&port->count));
39060 }
39061
39062 /* Wait until the transmitter is empty.
39063@@ -2507,7 +2507,7 @@ static int mgslpc_open(struct tty_struct *tty, struct file * filp)
39064
39065 if (debug_level >= DEBUG_LEVEL_INFO)
39066 printk("%s(%d):mgslpc_open(%s), old ref count = %d\n",
39067- __FILE__, __LINE__, tty->driver->name, port->count);
39068+ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
39069
39070 /* If port is closing, signal caller to try again */
39071 if (tty_hung_up_p(filp) || port->flags & ASYNC_CLOSING){
39072@@ -2527,11 +2527,11 @@ static int mgslpc_open(struct tty_struct *tty, struct file * filp)
39073 goto cleanup;
39074 }
39075 spin_lock(&port->lock);
39076- port->count++;
39077+ atomic_inc(&port->count);
39078 spin_unlock(&port->lock);
39079 spin_unlock_irqrestore(&info->netlock, flags);
39080
39081- if (port->count == 1) {
39082+ if (atomic_read(&port->count) == 1) {
39083 /* 1st open on this device, init hardware */
39084 retval = startup(info, tty);
39085 if (retval < 0)
39086@@ -3920,7 +3920,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
39087 unsigned short new_crctype;
39088
39089 /* return error if TTY interface open */
39090- if (info->port.count)
39091+ if (atomic_read(&info->port.count))
39092 return -EBUSY;
39093
39094 switch (encoding)
39095@@ -4024,7 +4024,7 @@ static int hdlcdev_open(struct net_device *dev)
39096
39097 /* arbitrate between network and tty opens */
39098 spin_lock_irqsave(&info->netlock, flags);
39099- if (info->port.count != 0 || info->netcount != 0) {
39100+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
39101 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
39102 spin_unlock_irqrestore(&info->netlock, flags);
39103 return -EBUSY;
39104@@ -4114,7 +4114,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
39105 printk("%s:hdlcdev_ioctl(%s)\n", __FILE__, dev->name);
39106
39107 /* return error if TTY interface open */
39108- if (info->port.count)
39109+ if (atomic_read(&info->port.count))
39110 return -EBUSY;
39111
39112 if (cmd != SIOCWANDEV)
39113diff --git a/drivers/char/random.c b/drivers/char/random.c
39114index 429b75b..a7f4145 100644
39115--- a/drivers/char/random.c
39116+++ b/drivers/char/random.c
39117@@ -270,10 +270,17 @@
39118 /*
39119 * Configuration information
39120 */
39121+#ifdef CONFIG_GRKERNSEC_RANDNET
39122+#define INPUT_POOL_SHIFT 14
39123+#define INPUT_POOL_WORDS (1 << (INPUT_POOL_SHIFT-5))
39124+#define OUTPUT_POOL_SHIFT 12
39125+#define OUTPUT_POOL_WORDS (1 << (OUTPUT_POOL_SHIFT-5))
39126+#else
39127 #define INPUT_POOL_SHIFT 12
39128 #define INPUT_POOL_WORDS (1 << (INPUT_POOL_SHIFT-5))
39129 #define OUTPUT_POOL_SHIFT 10
39130 #define OUTPUT_POOL_WORDS (1 << (OUTPUT_POOL_SHIFT-5))
39131+#endif
39132 #define SEC_XFER_SIZE 512
39133 #define EXTRACT_SIZE 10
39134
39135@@ -284,9 +291,6 @@
39136 /*
39137 * To allow fractional bits to be tracked, the entropy_count field is
39138 * denominated in units of 1/8th bits.
39139- *
39140- * 2*(ENTROPY_SHIFT + log2(poolbits)) must <= 31, or the multiply in
39141- * credit_entropy_bits() needs to be 64 bits wide.
39142 */
39143 #define ENTROPY_SHIFT 3
39144 #define ENTROPY_BITS(r) ((r)->entropy_count >> ENTROPY_SHIFT)
39145@@ -361,12 +365,19 @@ static struct poolinfo {
39146 #define S(x) ilog2(x)+5, (x), (x)*4, (x)*32, (x) << (ENTROPY_SHIFT+5)
39147 int tap1, tap2, tap3, tap4, tap5;
39148 } poolinfo_table[] = {
39149+#ifdef CONFIG_GRKERNSEC_RANDNET
39150+ /* x^512 + x^411 + x^308 + x^208 +x^104 + x + 1 -- 225 */
39151+ { S(512), 411, 308, 208, 104, 1 },
39152+ /* x^128 + x^104 + x^76 + x^51 + x^25 + x + 1 -- 105 */
39153+ { S(128), 104, 76, 51, 25, 1 },
39154+#else
39155 /* was: x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 */
39156 /* x^128 + x^104 + x^76 + x^51 +x^25 + x + 1 */
39157 { S(128), 104, 76, 51, 25, 1 },
39158 /* was: x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 */
39159 /* x^32 + x^26 + x^19 + x^14 + x^7 + x + 1 */
39160 { S(32), 26, 19, 14, 7, 1 },
39161+#endif
39162 #if 0
39163 /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */
39164 { S(2048), 1638, 1231, 819, 411, 1 },
39165@@ -433,9 +444,9 @@ struct entropy_store {
39166 };
39167
39168 static void push_to_pool(struct work_struct *work);
39169-static __u32 input_pool_data[INPUT_POOL_WORDS];
39170-static __u32 blocking_pool_data[OUTPUT_POOL_WORDS];
39171-static __u32 nonblocking_pool_data[OUTPUT_POOL_WORDS];
39172+static __u32 input_pool_data[INPUT_POOL_WORDS] __latent_entropy;
39173+static __u32 blocking_pool_data[OUTPUT_POOL_WORDS] __latent_entropy;
39174+static __u32 nonblocking_pool_data[OUTPUT_POOL_WORDS] __latent_entropy;
39175
39176 static struct entropy_store input_pool = {
39177 .poolinfo = &poolinfo_table[0],
39178@@ -524,8 +535,8 @@ static void _mix_pool_bytes(struct entropy_store *r, const void *in,
39179 input_rotate = (input_rotate + (i ? 7 : 14)) & 31;
39180 }
39181
39182- ACCESS_ONCE(r->input_rotate) = input_rotate;
39183- ACCESS_ONCE(r->add_ptr) = i;
39184+ ACCESS_ONCE_RW(r->input_rotate) = input_rotate;
39185+ ACCESS_ONCE_RW(r->add_ptr) = i;
39186 smp_wmb();
39187
39188 if (out)
39189@@ -632,7 +643,7 @@ retry:
39190 /* The +2 corresponds to the /4 in the denominator */
39191
39192 do {
39193- unsigned int anfrac = min(pnfrac, pool_size/2);
39194+ u64 anfrac = min(pnfrac, pool_size/2);
39195 unsigned int add =
39196 ((pool_size - entropy_count)*anfrac*3) >> s;
39197
39198@@ -1151,7 +1162,7 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
39199
39200 extract_buf(r, tmp);
39201 i = min_t(int, nbytes, EXTRACT_SIZE);
39202- if (copy_to_user(buf, tmp, i)) {
39203+ if (i > sizeof(tmp) || copy_to_user(buf, tmp, i)) {
39204 ret = -EFAULT;
39205 break;
39206 }
39207@@ -1507,7 +1518,7 @@ EXPORT_SYMBOL(generate_random_uuid);
39208 #include <linux/sysctl.h>
39209
39210 static int min_read_thresh = 8, min_write_thresh;
39211-static int max_read_thresh = INPUT_POOL_WORDS * 32;
39212+static int max_read_thresh = OUTPUT_POOL_WORDS * 32;
39213 static int max_write_thresh = INPUT_POOL_WORDS * 32;
39214 static char sysctl_bootid[16];
39215
39216@@ -1523,7 +1534,7 @@ static char sysctl_bootid[16];
39217 static int proc_do_uuid(struct ctl_table *table, int write,
39218 void __user *buffer, size_t *lenp, loff_t *ppos)
39219 {
39220- struct ctl_table fake_table;
39221+ ctl_table_no_const fake_table;
39222 unsigned char buf[64], tmp_uuid[16], *uuid;
39223
39224 uuid = table->data;
39225@@ -1553,7 +1564,7 @@ static int proc_do_uuid(struct ctl_table *table, int write,
39226 static int proc_do_entropy(ctl_table *table, int write,
39227 void __user *buffer, size_t *lenp, loff_t *ppos)
39228 {
39229- ctl_table fake_table;
39230+ ctl_table_no_const fake_table;
39231 int entropy_count;
39232
39233 entropy_count = *(int *)table->data >> ENTROPY_SHIFT;
39234diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
39235index 7cc1fe22..b602d6b 100644
39236--- a/drivers/char/sonypi.c
39237+++ b/drivers/char/sonypi.c
39238@@ -54,6 +54,7 @@
39239
39240 #include <asm/uaccess.h>
39241 #include <asm/io.h>
39242+#include <asm/local.h>
39243
39244 #include <linux/sonypi.h>
39245
39246@@ -490,7 +491,7 @@ static struct sonypi_device {
39247 spinlock_t fifo_lock;
39248 wait_queue_head_t fifo_proc_list;
39249 struct fasync_struct *fifo_async;
39250- int open_count;
39251+ local_t open_count;
39252 int model;
39253 struct input_dev *input_jog_dev;
39254 struct input_dev *input_key_dev;
39255@@ -892,7 +893,7 @@ static int sonypi_misc_fasync(int fd, struct file *filp, int on)
39256 static int sonypi_misc_release(struct inode *inode, struct file *file)
39257 {
39258 mutex_lock(&sonypi_device.lock);
39259- sonypi_device.open_count--;
39260+ local_dec(&sonypi_device.open_count);
39261 mutex_unlock(&sonypi_device.lock);
39262 return 0;
39263 }
39264@@ -901,9 +902,9 @@ static int sonypi_misc_open(struct inode *inode, struct file *file)
39265 {
39266 mutex_lock(&sonypi_device.lock);
39267 /* Flush input queue on first open */
39268- if (!sonypi_device.open_count)
39269+ if (!local_read(&sonypi_device.open_count))
39270 kfifo_reset(&sonypi_device.fifo);
39271- sonypi_device.open_count++;
39272+ local_inc(&sonypi_device.open_count);
39273 mutex_unlock(&sonypi_device.lock);
39274
39275 return 0;
39276diff --git a/drivers/char/tpm/tpm_acpi.c b/drivers/char/tpm/tpm_acpi.c
39277index b9a57fa..5bb9e38 100644
39278--- a/drivers/char/tpm/tpm_acpi.c
39279+++ b/drivers/char/tpm/tpm_acpi.c
39280@@ -98,11 +98,12 @@ int read_log(struct tpm_bios_log *log)
39281 virt = acpi_os_map_memory(start, len);
39282 if (!virt) {
39283 kfree(log->bios_event_log);
39284+ log->bios_event_log = NULL;
39285 printk("%s: ERROR - Unable to map memory\n", __func__);
39286 return -EIO;
39287 }
39288
39289- memcpy_fromio(log->bios_event_log, virt, len);
39290+ memcpy_fromio(log->bios_event_log, (const char __force_kernel *)virt, len);
39291
39292 acpi_os_unmap_memory(virt, len);
39293 return 0;
39294diff --git a/drivers/char/tpm/tpm_eventlog.c b/drivers/char/tpm/tpm_eventlog.c
39295index 59f7cb2..bac8b6d 100644
39296--- a/drivers/char/tpm/tpm_eventlog.c
39297+++ b/drivers/char/tpm/tpm_eventlog.c
39298@@ -95,7 +95,7 @@ static void *tpm_bios_measurements_start(struct seq_file *m, loff_t *pos)
39299 event = addr;
39300
39301 if ((event->event_type == 0 && event->event_size == 0) ||
39302- ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
39303+ (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
39304 return NULL;
39305
39306 return addr;
39307@@ -120,7 +120,7 @@ static void *tpm_bios_measurements_next(struct seq_file *m, void *v,
39308 return NULL;
39309
39310 if ((event->event_type == 0 && event->event_size == 0) ||
39311- ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
39312+ (event->event_size >= limit - v - sizeof(struct tcpa_event)))
39313 return NULL;
39314
39315 (*pos)++;
39316@@ -213,7 +213,8 @@ static int tpm_binary_bios_measurements_show(struct seq_file *m, void *v)
39317 int i;
39318
39319 for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
39320- seq_putc(m, data[i]);
39321+ if (!seq_putc(m, data[i]))
39322+ return -EFAULT;
39323
39324 return 0;
39325 }
39326diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
39327index 6928d09..ff6abe8 100644
39328--- a/drivers/char/virtio_console.c
39329+++ b/drivers/char/virtio_console.c
39330@@ -684,7 +684,7 @@ static ssize_t fill_readbuf(struct port *port, char *out_buf, size_t out_count,
39331 if (to_user) {
39332 ssize_t ret;
39333
39334- ret = copy_to_user(out_buf, buf->buf + buf->offset, out_count);
39335+ ret = copy_to_user((char __force_user *)out_buf, buf->buf + buf->offset, out_count);
39336 if (ret)
39337 return -EFAULT;
39338 } else {
39339@@ -787,7 +787,7 @@ static ssize_t port_fops_read(struct file *filp, char __user *ubuf,
39340 if (!port_has_data(port) && !port->host_connected)
39341 return 0;
39342
39343- return fill_readbuf(port, ubuf, count, true);
39344+ return fill_readbuf(port, (char __force_kernel *)ubuf, count, true);
39345 }
39346
39347 static int wait_port_writable(struct port *port, bool nonblock)
39348diff --git a/drivers/clk/clk-composite.c b/drivers/clk/clk-composite.c
39349index 57a078e..c17cde8 100644
39350--- a/drivers/clk/clk-composite.c
39351+++ b/drivers/clk/clk-composite.c
39352@@ -146,7 +146,7 @@ struct clk *clk_register_composite(struct device *dev, const char *name,
39353 struct clk *clk;
39354 struct clk_init_data init;
39355 struct clk_composite *composite;
39356- struct clk_ops *clk_composite_ops;
39357+ clk_ops_no_const *clk_composite_ops;
39358
39359 composite = kzalloc(sizeof(*composite), GFP_KERNEL);
39360 if (!composite) {
39361diff --git a/drivers/clk/socfpga/clk.c b/drivers/clk/socfpga/clk.c
39362index 5983a26..65d5f46 100644
39363--- a/drivers/clk/socfpga/clk.c
39364+++ b/drivers/clk/socfpga/clk.c
39365@@ -22,6 +22,7 @@
39366 #include <linux/clk-provider.h>
39367 #include <linux/io.h>
39368 #include <linux/of.h>
39369+#include <asm/pgtable.h>
39370
39371 /* Clock Manager offsets */
39372 #define CLKMGR_CTRL 0x0
39373@@ -150,8 +151,10 @@ static __init struct clk *socfpga_clk_init(struct device_node *node,
39374 streq(clk_name, "periph_pll") ||
39375 streq(clk_name, "sdram_pll")) {
39376 socfpga_clk->hw.bit_idx = SOCFPGA_PLL_EXT_ENA;
39377- clk_pll_ops.enable = clk_gate_ops.enable;
39378- clk_pll_ops.disable = clk_gate_ops.disable;
39379+ pax_open_kernel();
39380+ *(void **)&clk_pll_ops.enable = clk_gate_ops.enable;
39381+ *(void **)&clk_pll_ops.disable = clk_gate_ops.disable;
39382+ pax_close_kernel();
39383 }
39384
39385 clk = clk_register(NULL, &socfpga_clk->hw.hw);
39386@@ -242,7 +245,7 @@ static unsigned long socfpga_clk_recalc_rate(struct clk_hw *hwclk,
39387 return parent_rate / div;
39388 }
39389
39390-static struct clk_ops gateclk_ops = {
39391+static clk_ops_no_const gateclk_ops __read_only = {
39392 .recalc_rate = socfpga_clk_recalc_rate,
39393 .get_parent = socfpga_clk_get_parent,
39394 .set_parent = socfpga_clk_set_parent,
39395diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
39396index 18448a7..d5fad43 100644
39397--- a/drivers/cpufreq/acpi-cpufreq.c
39398+++ b/drivers/cpufreq/acpi-cpufreq.c
39399@@ -676,8 +676,11 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
39400 data->acpi_data = per_cpu_ptr(acpi_perf_data, cpu);
39401 per_cpu(acfreq_data, cpu) = data;
39402
39403- if (cpu_has(c, X86_FEATURE_CONSTANT_TSC))
39404- acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
39405+ if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) {
39406+ pax_open_kernel();
39407+ *(u8 *)&acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
39408+ pax_close_kernel();
39409+ }
39410
39411 result = acpi_processor_register_performance(data->acpi_data, cpu);
39412 if (result)
39413@@ -810,7 +813,9 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
39414 policy->cur = acpi_cpufreq_guess_freq(data, policy->cpu);
39415 break;
39416 case ACPI_ADR_SPACE_FIXED_HARDWARE:
39417- acpi_cpufreq_driver.get = get_cur_freq_on_cpu;
39418+ pax_open_kernel();
39419+ *(void **)&acpi_cpufreq_driver.get = get_cur_freq_on_cpu;
39420+ pax_close_kernel();
39421 break;
39422 default:
39423 break;
39424@@ -905,8 +910,10 @@ static void __init acpi_cpufreq_boost_init(void)
39425 if (!msrs)
39426 return;
39427
39428- acpi_cpufreq_driver.boost_supported = true;
39429- acpi_cpufreq_driver.boost_enabled = boost_state(0);
39430+ pax_open_kernel();
39431+ *(bool *)&acpi_cpufreq_driver.boost_supported = true;
39432+ *(bool *)&acpi_cpufreq_driver.boost_enabled = boost_state(0);
39433+ pax_close_kernel();
39434 get_online_cpus();
39435
39436 /* Force all MSRs to the same value */
39437diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
39438index 199b52b..e3503bb 100644
39439--- a/drivers/cpufreq/cpufreq.c
39440+++ b/drivers/cpufreq/cpufreq.c
39441@@ -1970,7 +1970,7 @@ void cpufreq_unregister_governor(struct cpufreq_governor *governor)
39442 #endif
39443
39444 mutex_lock(&cpufreq_governor_mutex);
39445- list_del(&governor->governor_list);
39446+ pax_list_del(&governor->governor_list);
39447 mutex_unlock(&cpufreq_governor_mutex);
39448 return;
39449 }
39450@@ -2200,7 +2200,7 @@ static int cpufreq_cpu_callback(struct notifier_block *nfb,
39451 return NOTIFY_OK;
39452 }
39453
39454-static struct notifier_block __refdata cpufreq_cpu_notifier = {
39455+static struct notifier_block cpufreq_cpu_notifier = {
39456 .notifier_call = cpufreq_cpu_callback,
39457 };
39458
39459@@ -2240,13 +2240,17 @@ int cpufreq_boost_trigger_state(int state)
39460 return 0;
39461
39462 write_lock_irqsave(&cpufreq_driver_lock, flags);
39463- cpufreq_driver->boost_enabled = state;
39464+ pax_open_kernel();
39465+ *(bool *)&cpufreq_driver->boost_enabled = state;
39466+ pax_close_kernel();
39467 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
39468
39469 ret = cpufreq_driver->set_boost(state);
39470 if (ret) {
39471 write_lock_irqsave(&cpufreq_driver_lock, flags);
39472- cpufreq_driver->boost_enabled = !state;
39473+ pax_open_kernel();
39474+ *(bool *)&cpufreq_driver->boost_enabled = !state;
39475+ pax_close_kernel();
39476 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
39477
39478 pr_err("%s: Cannot %s BOOST\n", __func__,
39479@@ -2300,8 +2304,11 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
39480
39481 pr_debug("trying to register driver %s\n", driver_data->name);
39482
39483- if (driver_data->setpolicy)
39484- driver_data->flags |= CPUFREQ_CONST_LOOPS;
39485+ if (driver_data->setpolicy) {
39486+ pax_open_kernel();
39487+ *(u8 *)&driver_data->flags |= CPUFREQ_CONST_LOOPS;
39488+ pax_close_kernel();
39489+ }
39490
39491 write_lock_irqsave(&cpufreq_driver_lock, flags);
39492 if (cpufreq_driver) {
39493@@ -2316,8 +2323,11 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
39494 * Check if driver provides function to enable boost -
39495 * if not, use cpufreq_boost_set_sw as default
39496 */
39497- if (!cpufreq_driver->set_boost)
39498- cpufreq_driver->set_boost = cpufreq_boost_set_sw;
39499+ if (!cpufreq_driver->set_boost) {
39500+ pax_open_kernel();
39501+ *(void **)&cpufreq_driver->set_boost = cpufreq_boost_set_sw;
39502+ pax_close_kernel();
39503+ }
39504
39505 ret = cpufreq_sysfs_create_file(&boost.attr);
39506 if (ret) {
39507diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
39508index ba43991..23858ffb 100644
39509--- a/drivers/cpufreq/cpufreq_governor.c
39510+++ b/drivers/cpufreq/cpufreq_governor.c
39511@@ -191,7 +191,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
39512 struct dbs_data *dbs_data;
39513 struct od_cpu_dbs_info_s *od_dbs_info = NULL;
39514 struct cs_cpu_dbs_info_s *cs_dbs_info = NULL;
39515- struct od_ops *od_ops = NULL;
39516+ const struct od_ops *od_ops = NULL;
39517 struct od_dbs_tuners *od_tuners = NULL;
39518 struct cs_dbs_tuners *cs_tuners = NULL;
39519 struct cpu_dbs_common_info *cpu_cdbs;
39520@@ -257,7 +257,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
39521
39522 if ((cdata->governor == GOV_CONSERVATIVE) &&
39523 (!policy->governor->initialized)) {
39524- struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
39525+ const struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
39526
39527 cpufreq_register_notifier(cs_ops->notifier_block,
39528 CPUFREQ_TRANSITION_NOTIFIER);
39529@@ -277,7 +277,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
39530
39531 if ((dbs_data->cdata->governor == GOV_CONSERVATIVE) &&
39532 (policy->governor->initialized == 1)) {
39533- struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
39534+ const struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
39535
39536 cpufreq_unregister_notifier(cs_ops->notifier_block,
39537 CPUFREQ_TRANSITION_NOTIFIER);
39538diff --git a/drivers/cpufreq/cpufreq_governor.h b/drivers/cpufreq/cpufreq_governor.h
39539index bfb9ae1..e1d3a7e 100644
39540--- a/drivers/cpufreq/cpufreq_governor.h
39541+++ b/drivers/cpufreq/cpufreq_governor.h
39542@@ -205,7 +205,7 @@ struct common_dbs_data {
39543 void (*exit)(struct dbs_data *dbs_data);
39544
39545 /* Governor specific ops, see below */
39546- void *gov_ops;
39547+ const void *gov_ops;
39548 };
39549
39550 /* Governor Per policy data */
39551@@ -225,7 +225,7 @@ struct od_ops {
39552 unsigned int (*powersave_bias_target)(struct cpufreq_policy *policy,
39553 unsigned int freq_next, unsigned int relation);
39554 void (*freq_increase)(struct cpufreq_policy *policy, unsigned int freq);
39555-};
39556+} __no_const;
39557
39558 struct cs_ops {
39559 struct notifier_block *notifier_block;
39560diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
39561index 18d4091..434be15 100644
39562--- a/drivers/cpufreq/cpufreq_ondemand.c
39563+++ b/drivers/cpufreq/cpufreq_ondemand.c
39564@@ -521,7 +521,7 @@ static void od_exit(struct dbs_data *dbs_data)
39565
39566 define_get_cpu_dbs_routines(od_cpu_dbs_info);
39567
39568-static struct od_ops od_ops = {
39569+static struct od_ops od_ops __read_only = {
39570 .powersave_bias_init_cpu = ondemand_powersave_bias_init_cpu,
39571 .powersave_bias_target = generic_powersave_bias_target,
39572 .freq_increase = dbs_freq_increase,
39573@@ -576,14 +576,18 @@ void od_register_powersave_bias_handler(unsigned int (*f)
39574 (struct cpufreq_policy *, unsigned int, unsigned int),
39575 unsigned int powersave_bias)
39576 {
39577- od_ops.powersave_bias_target = f;
39578+ pax_open_kernel();
39579+ *(void **)&od_ops.powersave_bias_target = f;
39580+ pax_close_kernel();
39581 od_set_powersave_bias(powersave_bias);
39582 }
39583 EXPORT_SYMBOL_GPL(od_register_powersave_bias_handler);
39584
39585 void od_unregister_powersave_bias_handler(void)
39586 {
39587- od_ops.powersave_bias_target = generic_powersave_bias_target;
39588+ pax_open_kernel();
39589+ *(void **)&od_ops.powersave_bias_target = generic_powersave_bias_target;
39590+ pax_close_kernel();
39591 od_set_powersave_bias(0);
39592 }
39593 EXPORT_SYMBOL_GPL(od_unregister_powersave_bias_handler);
39594diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
39595index 9ac3783..652b033 100644
39596--- a/drivers/cpufreq/intel_pstate.c
39597+++ b/drivers/cpufreq/intel_pstate.c
39598@@ -126,10 +126,10 @@ struct pstate_funcs {
39599 struct cpu_defaults {
39600 struct pstate_adjust_policy pid_policy;
39601 struct pstate_funcs funcs;
39602-};
39603+} __do_const;
39604
39605 static struct pstate_adjust_policy pid_params;
39606-static struct pstate_funcs pstate_funcs;
39607+static struct pstate_funcs *pstate_funcs;
39608
39609 struct perf_limits {
39610 int no_turbo;
39611@@ -527,7 +527,7 @@ static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate)
39612
39613 cpu->pstate.current_pstate = pstate;
39614
39615- pstate_funcs.set(cpu, pstate);
39616+ pstate_funcs->set(cpu, pstate);
39617 }
39618
39619 static inline void intel_pstate_pstate_increase(struct cpudata *cpu, int steps)
39620@@ -549,12 +549,12 @@ static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
39621 {
39622 sprintf(cpu->name, "Intel 2nd generation core");
39623
39624- cpu->pstate.min_pstate = pstate_funcs.get_min();
39625- cpu->pstate.max_pstate = pstate_funcs.get_max();
39626- cpu->pstate.turbo_pstate = pstate_funcs.get_turbo();
39627+ cpu->pstate.min_pstate = pstate_funcs->get_min();
39628+ cpu->pstate.max_pstate = pstate_funcs->get_max();
39629+ cpu->pstate.turbo_pstate = pstate_funcs->get_turbo();
39630
39631- if (pstate_funcs.get_vid)
39632- pstate_funcs.get_vid(cpu);
39633+ if (pstate_funcs->get_vid)
39634+ pstate_funcs->get_vid(cpu);
39635 intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate);
39636 }
39637
39638@@ -830,9 +830,9 @@ static int intel_pstate_msrs_not_valid(void)
39639 rdmsrl(MSR_IA32_APERF, aperf);
39640 rdmsrl(MSR_IA32_MPERF, mperf);
39641
39642- if (!pstate_funcs.get_max() ||
39643- !pstate_funcs.get_min() ||
39644- !pstate_funcs.get_turbo())
39645+ if (!pstate_funcs->get_max() ||
39646+ !pstate_funcs->get_min() ||
39647+ !pstate_funcs->get_turbo())
39648 return -ENODEV;
39649
39650 rdmsrl(MSR_IA32_APERF, tmp);
39651@@ -846,7 +846,7 @@ static int intel_pstate_msrs_not_valid(void)
39652 return 0;
39653 }
39654
39655-static void copy_pid_params(struct pstate_adjust_policy *policy)
39656+static void copy_pid_params(const struct pstate_adjust_policy *policy)
39657 {
39658 pid_params.sample_rate_ms = policy->sample_rate_ms;
39659 pid_params.p_gain_pct = policy->p_gain_pct;
39660@@ -858,11 +858,7 @@ static void copy_pid_params(struct pstate_adjust_policy *policy)
39661
39662 static void copy_cpu_funcs(struct pstate_funcs *funcs)
39663 {
39664- pstate_funcs.get_max = funcs->get_max;
39665- pstate_funcs.get_min = funcs->get_min;
39666- pstate_funcs.get_turbo = funcs->get_turbo;
39667- pstate_funcs.set = funcs->set;
39668- pstate_funcs.get_vid = funcs->get_vid;
39669+ pstate_funcs = funcs;
39670 }
39671
39672 #if IS_ENABLED(CONFIG_ACPI)
39673diff --git a/drivers/cpufreq/p4-clockmod.c b/drivers/cpufreq/p4-clockmod.c
39674index 3d1cba9..0ab21d2 100644
39675--- a/drivers/cpufreq/p4-clockmod.c
39676+++ b/drivers/cpufreq/p4-clockmod.c
39677@@ -134,10 +134,14 @@ static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c)
39678 case 0x0F: /* Core Duo */
39679 case 0x16: /* Celeron Core */
39680 case 0x1C: /* Atom */
39681- p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
39682+ pax_open_kernel();
39683+ *(u8 *)&p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
39684+ pax_close_kernel();
39685 return speedstep_get_frequency(SPEEDSTEP_CPU_PCORE);
39686 case 0x0D: /* Pentium M (Dothan) */
39687- p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
39688+ pax_open_kernel();
39689+ *(u8 *)&p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
39690+ pax_close_kernel();
39691 /* fall through */
39692 case 0x09: /* Pentium M (Banias) */
39693 return speedstep_get_frequency(SPEEDSTEP_CPU_PM);
39694@@ -149,7 +153,9 @@ static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c)
39695
39696 /* on P-4s, the TSC runs with constant frequency independent whether
39697 * throttling is active or not. */
39698- p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
39699+ pax_open_kernel();
39700+ *(u8 *)&p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
39701+ pax_close_kernel();
39702
39703 if (speedstep_detect_processor() == SPEEDSTEP_CPU_P4M) {
39704 printk(KERN_WARNING PFX "Warning: Pentium 4-M detected. "
39705diff --git a/drivers/cpufreq/sparc-us3-cpufreq.c b/drivers/cpufreq/sparc-us3-cpufreq.c
39706index 724ffbd..f06aaaa 100644
39707--- a/drivers/cpufreq/sparc-us3-cpufreq.c
39708+++ b/drivers/cpufreq/sparc-us3-cpufreq.c
39709@@ -18,14 +18,12 @@
39710 #include <asm/head.h>
39711 #include <asm/timer.h>
39712
39713-static struct cpufreq_driver *cpufreq_us3_driver;
39714-
39715 struct us3_freq_percpu_info {
39716 struct cpufreq_frequency_table table[4];
39717 };
39718
39719 /* Indexed by cpu number. */
39720-static struct us3_freq_percpu_info *us3_freq_table;
39721+static struct us3_freq_percpu_info us3_freq_table[NR_CPUS];
39722
39723 /* UltraSPARC-III has three dividers: 1, 2, and 32. These are controlled
39724 * in the Safari config register.
39725@@ -156,18 +154,28 @@ static int __init us3_freq_cpu_init(struct cpufreq_policy *policy)
39726
39727 static int us3_freq_cpu_exit(struct cpufreq_policy *policy)
39728 {
39729- if (cpufreq_us3_driver) {
39730- cpufreq_frequency_table_put_attr(policy->cpu);
39731- us3_freq_target(policy, 0);
39732- }
39733+ cpufreq_frequency_table_put_attr(policy->cpu);
39734+ us3_freq_target(policy, 0);
39735
39736 return 0;
39737 }
39738
39739+static int __init us3_freq_init(void);
39740+static void __exit us3_freq_exit(void);
39741+
39742+static struct cpufreq_driver cpufreq_us3_driver = {
39743+ .init = us3_freq_cpu_init,
39744+ .verify = cpufreq_generic_frequency_table_verify,
39745+ .target_index = us3_freq_target,
39746+ .get = us3_freq_get,
39747+ .exit = us3_freq_cpu_exit,
39748+ .name = "UltraSPARC-III",
39749+
39750+};
39751+
39752 static int __init us3_freq_init(void)
39753 {
39754 unsigned long manuf, impl, ver;
39755- int ret;
39756
39757 if (tlb_type != cheetah && tlb_type != cheetah_plus)
39758 return -ENODEV;
39759@@ -180,55 +188,15 @@ static int __init us3_freq_init(void)
39760 (impl == CHEETAH_IMPL ||
39761 impl == CHEETAH_PLUS_IMPL ||
39762 impl == JAGUAR_IMPL ||
39763- impl == PANTHER_IMPL)) {
39764- struct cpufreq_driver *driver;
39765-
39766- ret = -ENOMEM;
39767- driver = kzalloc(sizeof(*driver), GFP_KERNEL);
39768- if (!driver)
39769- goto err_out;
39770-
39771- us3_freq_table = kzalloc((NR_CPUS * sizeof(*us3_freq_table)),
39772- GFP_KERNEL);
39773- if (!us3_freq_table)
39774- goto err_out;
39775-
39776- driver->init = us3_freq_cpu_init;
39777- driver->verify = cpufreq_generic_frequency_table_verify;
39778- driver->target_index = us3_freq_target;
39779- driver->get = us3_freq_get;
39780- driver->exit = us3_freq_cpu_exit;
39781- strcpy(driver->name, "UltraSPARC-III");
39782-
39783- cpufreq_us3_driver = driver;
39784- ret = cpufreq_register_driver(driver);
39785- if (ret)
39786- goto err_out;
39787-
39788- return 0;
39789-
39790-err_out:
39791- if (driver) {
39792- kfree(driver);
39793- cpufreq_us3_driver = NULL;
39794- }
39795- kfree(us3_freq_table);
39796- us3_freq_table = NULL;
39797- return ret;
39798- }
39799+ impl == PANTHER_IMPL))
39800+ return cpufreq_register_driver(&cpufreq_us3_driver);
39801
39802 return -ENODEV;
39803 }
39804
39805 static void __exit us3_freq_exit(void)
39806 {
39807- if (cpufreq_us3_driver) {
39808- cpufreq_unregister_driver(cpufreq_us3_driver);
39809- kfree(cpufreq_us3_driver);
39810- cpufreq_us3_driver = NULL;
39811- kfree(us3_freq_table);
39812- us3_freq_table = NULL;
39813- }
39814+ cpufreq_unregister_driver(&cpufreq_us3_driver);
39815 }
39816
39817 MODULE_AUTHOR("David S. Miller <davem@redhat.com>");
39818diff --git a/drivers/cpufreq/speedstep-centrino.c b/drivers/cpufreq/speedstep-centrino.c
39819index 4e1daca..e707b61 100644
39820--- a/drivers/cpufreq/speedstep-centrino.c
39821+++ b/drivers/cpufreq/speedstep-centrino.c
39822@@ -351,8 +351,11 @@ static int centrino_cpu_init(struct cpufreq_policy *policy)
39823 !cpu_has(cpu, X86_FEATURE_EST))
39824 return -ENODEV;
39825
39826- if (cpu_has(cpu, X86_FEATURE_CONSTANT_TSC))
39827- centrino_driver.flags |= CPUFREQ_CONST_LOOPS;
39828+ if (cpu_has(cpu, X86_FEATURE_CONSTANT_TSC)) {
39829+ pax_open_kernel();
39830+ *(u8 *)&centrino_driver.flags |= CPUFREQ_CONST_LOOPS;
39831+ pax_close_kernel();
39832+ }
39833
39834 if (policy->cpu != 0)
39835 return -ENODEV;
39836diff --git a/drivers/cpuidle/driver.c b/drivers/cpuidle/driver.c
39837index 06dbe7c..c2c8671 100644
39838--- a/drivers/cpuidle/driver.c
39839+++ b/drivers/cpuidle/driver.c
39840@@ -202,7 +202,7 @@ static int poll_idle(struct cpuidle_device *dev,
39841
39842 static void poll_idle_init(struct cpuidle_driver *drv)
39843 {
39844- struct cpuidle_state *state = &drv->states[0];
39845+ cpuidle_state_no_const *state = &drv->states[0];
39846
39847 snprintf(state->name, CPUIDLE_NAME_LEN, "POLL");
39848 snprintf(state->desc, CPUIDLE_DESC_LEN, "CPUIDLE CORE POLL IDLE");
39849diff --git a/drivers/cpuidle/governor.c b/drivers/cpuidle/governor.c
39850index ca89412..a7b9c49 100644
39851--- a/drivers/cpuidle/governor.c
39852+++ b/drivers/cpuidle/governor.c
39853@@ -87,7 +87,7 @@ int cpuidle_register_governor(struct cpuidle_governor *gov)
39854 mutex_lock(&cpuidle_lock);
39855 if (__cpuidle_find_governor(gov->name) == NULL) {
39856 ret = 0;
39857- list_add_tail(&gov->governor_list, &cpuidle_governors);
39858+ pax_list_add_tail((struct list_head *)&gov->governor_list, &cpuidle_governors);
39859 if (!cpuidle_curr_governor ||
39860 cpuidle_curr_governor->rating < gov->rating)
39861 cpuidle_switch_governor(gov);
39862diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c
39863index e918b6d..f87ea80 100644
39864--- a/drivers/cpuidle/sysfs.c
39865+++ b/drivers/cpuidle/sysfs.c
39866@@ -135,7 +135,7 @@ static struct attribute *cpuidle_switch_attrs[] = {
39867 NULL
39868 };
39869
39870-static struct attribute_group cpuidle_attr_group = {
39871+static attribute_group_no_const cpuidle_attr_group = {
39872 .attrs = cpuidle_default_attrs,
39873 .name = "cpuidle",
39874 };
39875diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c
39876index 12fea3e..1e28f47 100644
39877--- a/drivers/crypto/hifn_795x.c
39878+++ b/drivers/crypto/hifn_795x.c
39879@@ -51,7 +51,7 @@ module_param_string(hifn_pll_ref, hifn_pll_ref, sizeof(hifn_pll_ref), 0444);
39880 MODULE_PARM_DESC(hifn_pll_ref,
39881 "PLL reference clock (pci[freq] or ext[freq], default ext)");
39882
39883-static atomic_t hifn_dev_number;
39884+static atomic_unchecked_t hifn_dev_number;
39885
39886 #define ACRYPTO_OP_DECRYPT 0
39887 #define ACRYPTO_OP_ENCRYPT 1
39888@@ -2577,7 +2577,7 @@ static int hifn_probe(struct pci_dev *pdev, const struct pci_device_id *id)
39889 goto err_out_disable_pci_device;
39890
39891 snprintf(name, sizeof(name), "hifn%d",
39892- atomic_inc_return(&hifn_dev_number)-1);
39893+ atomic_inc_return_unchecked(&hifn_dev_number)-1);
39894
39895 err = pci_request_regions(pdev, name);
39896 if (err)
39897diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
39898index a0b2f7e..1b6f028 100644
39899--- a/drivers/devfreq/devfreq.c
39900+++ b/drivers/devfreq/devfreq.c
39901@@ -607,7 +607,7 @@ int devfreq_add_governor(struct devfreq_governor *governor)
39902 goto err_out;
39903 }
39904
39905- list_add(&governor->node, &devfreq_governor_list);
39906+ pax_list_add((struct list_head *)&governor->node, &devfreq_governor_list);
39907
39908 list_for_each_entry(devfreq, &devfreq_list, node) {
39909 int ret = 0;
39910@@ -695,7 +695,7 @@ int devfreq_remove_governor(struct devfreq_governor *governor)
39911 }
39912 }
39913
39914- list_del(&governor->node);
39915+ pax_list_del((struct list_head *)&governor->node);
39916 err_out:
39917 mutex_unlock(&devfreq_list_lock);
39918
39919diff --git a/drivers/dma/sh/shdma-base.c b/drivers/dma/sh/shdma-base.c
39920index 2e7b394..1371a64 100644
39921--- a/drivers/dma/sh/shdma-base.c
39922+++ b/drivers/dma/sh/shdma-base.c
39923@@ -267,8 +267,8 @@ static int shdma_alloc_chan_resources(struct dma_chan *chan)
39924 schan->slave_id = -EINVAL;
39925 }
39926
39927- schan->desc = kcalloc(NR_DESCS_PER_CHANNEL,
39928- sdev->desc_size, GFP_KERNEL);
39929+ schan->desc = kcalloc(sdev->desc_size,
39930+ NR_DESCS_PER_CHANNEL, GFP_KERNEL);
39931 if (!schan->desc) {
39932 ret = -ENOMEM;
39933 goto edescalloc;
39934diff --git a/drivers/dma/sh/shdmac.c b/drivers/dma/sh/shdmac.c
39935index 0d765c0..60b7480 100644
39936--- a/drivers/dma/sh/shdmac.c
39937+++ b/drivers/dma/sh/shdmac.c
39938@@ -511,7 +511,7 @@ static int sh_dmae_nmi_handler(struct notifier_block *self,
39939 return ret;
39940 }
39941
39942-static struct notifier_block sh_dmae_nmi_notifier __read_mostly = {
39943+static struct notifier_block sh_dmae_nmi_notifier = {
39944 .notifier_call = sh_dmae_nmi_handler,
39945
39946 /* Run before NMI debug handler and KGDB */
39947diff --git a/drivers/edac/edac_device.c b/drivers/edac/edac_device.c
39948index 592af5f..bb1d583 100644
39949--- a/drivers/edac/edac_device.c
39950+++ b/drivers/edac/edac_device.c
39951@@ -477,9 +477,9 @@ void edac_device_reset_delay_period(struct edac_device_ctl_info *edac_dev,
39952 */
39953 int edac_device_alloc_index(void)
39954 {
39955- static atomic_t device_indexes = ATOMIC_INIT(0);
39956+ static atomic_unchecked_t device_indexes = ATOMIC_INIT(0);
39957
39958- return atomic_inc_return(&device_indexes) - 1;
39959+ return atomic_inc_return_unchecked(&device_indexes) - 1;
39960 }
39961 EXPORT_SYMBOL_GPL(edac_device_alloc_index);
39962
39963diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
39964index b335c6a..db65b44 100644
39965--- a/drivers/edac/edac_mc_sysfs.c
39966+++ b/drivers/edac/edac_mc_sysfs.c
39967@@ -152,7 +152,7 @@ static const char * const edac_caps[] = {
39968 struct dev_ch_attribute {
39969 struct device_attribute attr;
39970 int channel;
39971-};
39972+} __do_const;
39973
39974 #define DEVICE_CHANNEL(_name, _mode, _show, _store, _var) \
39975 struct dev_ch_attribute dev_attr_legacy_##_name = \
39976@@ -1009,14 +1009,16 @@ int edac_create_sysfs_mci_device(struct mem_ctl_info *mci)
39977 }
39978
39979 if (mci->set_sdram_scrub_rate || mci->get_sdram_scrub_rate) {
39980+ pax_open_kernel();
39981 if (mci->get_sdram_scrub_rate) {
39982- dev_attr_sdram_scrub_rate.attr.mode |= S_IRUGO;
39983- dev_attr_sdram_scrub_rate.show = &mci_sdram_scrub_rate_show;
39984+ *(umode_t *)&dev_attr_sdram_scrub_rate.attr.mode |= S_IRUGO;
39985+ *(void **)&dev_attr_sdram_scrub_rate.show = &mci_sdram_scrub_rate_show;
39986 }
39987 if (mci->set_sdram_scrub_rate) {
39988- dev_attr_sdram_scrub_rate.attr.mode |= S_IWUSR;
39989- dev_attr_sdram_scrub_rate.store = &mci_sdram_scrub_rate_store;
39990+ *(umode_t *)&dev_attr_sdram_scrub_rate.attr.mode |= S_IWUSR;
39991+ *(void **)&dev_attr_sdram_scrub_rate.store = &mci_sdram_scrub_rate_store;
39992 }
39993+ pax_close_kernel();
39994 err = device_create_file(&mci->dev,
39995 &dev_attr_sdram_scrub_rate);
39996 if (err) {
39997diff --git a/drivers/edac/edac_pci.c b/drivers/edac/edac_pci.c
39998index 2cf44b4d..6dd2dc7 100644
39999--- a/drivers/edac/edac_pci.c
40000+++ b/drivers/edac/edac_pci.c
40001@@ -29,7 +29,7 @@
40002
40003 static DEFINE_MUTEX(edac_pci_ctls_mutex);
40004 static LIST_HEAD(edac_pci_list);
40005-static atomic_t pci_indexes = ATOMIC_INIT(0);
40006+static atomic_unchecked_t pci_indexes = ATOMIC_INIT(0);
40007
40008 /*
40009 * edac_pci_alloc_ctl_info
40010@@ -315,7 +315,7 @@ EXPORT_SYMBOL_GPL(edac_pci_reset_delay_period);
40011 */
40012 int edac_pci_alloc_index(void)
40013 {
40014- return atomic_inc_return(&pci_indexes) - 1;
40015+ return atomic_inc_return_unchecked(&pci_indexes) - 1;
40016 }
40017 EXPORT_SYMBOL_GPL(edac_pci_alloc_index);
40018
40019diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c
40020index e8658e4..22746d6 100644
40021--- a/drivers/edac/edac_pci_sysfs.c
40022+++ b/drivers/edac/edac_pci_sysfs.c
40023@@ -26,8 +26,8 @@ static int edac_pci_log_pe = 1; /* log PCI parity errors */
40024 static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
40025 static int edac_pci_poll_msec = 1000; /* one second workq period */
40026
40027-static atomic_t pci_parity_count = ATOMIC_INIT(0);
40028-static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
40029+static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
40030+static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
40031
40032 static struct kobject *edac_pci_top_main_kobj;
40033 static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
40034@@ -235,7 +235,7 @@ struct edac_pci_dev_attribute {
40035 void *value;
40036 ssize_t(*show) (void *, char *);
40037 ssize_t(*store) (void *, const char *, size_t);
40038-};
40039+} __do_const;
40040
40041 /* Set of show/store abstract level functions for PCI Parity object */
40042 static ssize_t edac_pci_dev_show(struct kobject *kobj, struct attribute *attr,
40043@@ -579,7 +579,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
40044 edac_printk(KERN_CRIT, EDAC_PCI,
40045 "Signaled System Error on %s\n",
40046 pci_name(dev));
40047- atomic_inc(&pci_nonparity_count);
40048+ atomic_inc_unchecked(&pci_nonparity_count);
40049 }
40050
40051 if (status & (PCI_STATUS_PARITY)) {
40052@@ -587,7 +587,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
40053 "Master Data Parity Error on %s\n",
40054 pci_name(dev));
40055
40056- atomic_inc(&pci_parity_count);
40057+ atomic_inc_unchecked(&pci_parity_count);
40058 }
40059
40060 if (status & (PCI_STATUS_DETECTED_PARITY)) {
40061@@ -595,7 +595,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
40062 "Detected Parity Error on %s\n",
40063 pci_name(dev));
40064
40065- atomic_inc(&pci_parity_count);
40066+ atomic_inc_unchecked(&pci_parity_count);
40067 }
40068 }
40069
40070@@ -618,7 +618,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
40071 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
40072 "Signaled System Error on %s\n",
40073 pci_name(dev));
40074- atomic_inc(&pci_nonparity_count);
40075+ atomic_inc_unchecked(&pci_nonparity_count);
40076 }
40077
40078 if (status & (PCI_STATUS_PARITY)) {
40079@@ -626,7 +626,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
40080 "Master Data Parity Error on "
40081 "%s\n", pci_name(dev));
40082
40083- atomic_inc(&pci_parity_count);
40084+ atomic_inc_unchecked(&pci_parity_count);
40085 }
40086
40087 if (status & (PCI_STATUS_DETECTED_PARITY)) {
40088@@ -634,7 +634,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
40089 "Detected Parity Error on %s\n",
40090 pci_name(dev));
40091
40092- atomic_inc(&pci_parity_count);
40093+ atomic_inc_unchecked(&pci_parity_count);
40094 }
40095 }
40096 }
40097@@ -672,7 +672,7 @@ void edac_pci_do_parity_check(void)
40098 if (!check_pci_errors)
40099 return;
40100
40101- before_count = atomic_read(&pci_parity_count);
40102+ before_count = atomic_read_unchecked(&pci_parity_count);
40103
40104 /* scan all PCI devices looking for a Parity Error on devices and
40105 * bridges.
40106@@ -684,7 +684,7 @@ void edac_pci_do_parity_check(void)
40107 /* Only if operator has selected panic on PCI Error */
40108 if (edac_pci_get_panic_on_pe()) {
40109 /* If the count is different 'after' from 'before' */
40110- if (before_count != atomic_read(&pci_parity_count))
40111+ if (before_count != atomic_read_unchecked(&pci_parity_count))
40112 panic("EDAC: PCI Parity Error");
40113 }
40114 }
40115diff --git a/drivers/edac/mce_amd.h b/drivers/edac/mce_amd.h
40116index 51b7e3a..aa8a3e8 100644
40117--- a/drivers/edac/mce_amd.h
40118+++ b/drivers/edac/mce_amd.h
40119@@ -77,7 +77,7 @@ struct amd_decoder_ops {
40120 bool (*mc0_mce)(u16, u8);
40121 bool (*mc1_mce)(u16, u8);
40122 bool (*mc2_mce)(u16, u8);
40123-};
40124+} __no_const;
40125
40126 void amd_report_gart_errors(bool);
40127 void amd_register_ecc_decoder(void (*f)(int, struct mce *));
40128diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
40129index 57ea7f4..af06b76 100644
40130--- a/drivers/firewire/core-card.c
40131+++ b/drivers/firewire/core-card.c
40132@@ -528,9 +528,9 @@ void fw_card_initialize(struct fw_card *card,
40133 const struct fw_card_driver *driver,
40134 struct device *device)
40135 {
40136- static atomic_t index = ATOMIC_INIT(-1);
40137+ static atomic_unchecked_t index = ATOMIC_INIT(-1);
40138
40139- card->index = atomic_inc_return(&index);
40140+ card->index = atomic_inc_return_unchecked(&index);
40141 card->driver = driver;
40142 card->device = device;
40143 card->current_tlabel = 0;
40144@@ -680,7 +680,7 @@ EXPORT_SYMBOL_GPL(fw_card_release);
40145
40146 void fw_core_remove_card(struct fw_card *card)
40147 {
40148- struct fw_card_driver dummy_driver = dummy_driver_template;
40149+ fw_card_driver_no_const dummy_driver = dummy_driver_template;
40150
40151 card->driver->update_phy_reg(card, 4,
40152 PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
40153diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c
40154index 2c6d5e1..a2cca6b 100644
40155--- a/drivers/firewire/core-device.c
40156+++ b/drivers/firewire/core-device.c
40157@@ -253,7 +253,7 @@ EXPORT_SYMBOL(fw_device_enable_phys_dma);
40158 struct config_rom_attribute {
40159 struct device_attribute attr;
40160 u32 key;
40161-};
40162+} __do_const;
40163
40164 static ssize_t show_immediate(struct device *dev,
40165 struct device_attribute *dattr, char *buf)
40166diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
40167index eb6935c..3cc2bfa 100644
40168--- a/drivers/firewire/core-transaction.c
40169+++ b/drivers/firewire/core-transaction.c
40170@@ -38,6 +38,7 @@
40171 #include <linux/timer.h>
40172 #include <linux/types.h>
40173 #include <linux/workqueue.h>
40174+#include <linux/sched.h>
40175
40176 #include <asm/byteorder.h>
40177
40178diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
40179index c98764a..551b520 100644
40180--- a/drivers/firewire/core.h
40181+++ b/drivers/firewire/core.h
40182@@ -111,6 +111,7 @@ struct fw_card_driver {
40183
40184 int (*stop_iso)(struct fw_iso_context *ctx);
40185 };
40186+typedef struct fw_card_driver __no_const fw_card_driver_no_const;
40187
40188 void fw_card_initialize(struct fw_card *card,
40189 const struct fw_card_driver *driver, struct device *device);
40190diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
40191index 8db6632..9bbc8ca 100644
40192--- a/drivers/firewire/ohci.c
40193+++ b/drivers/firewire/ohci.c
40194@@ -2049,10 +2049,12 @@ static void bus_reset_work(struct work_struct *work)
40195 be32_to_cpu(ohci->next_header));
40196 }
40197
40198+#ifndef CONFIG_GRKERNSEC
40199 if (param_remote_dma) {
40200 reg_write(ohci, OHCI1394_PhyReqFilterHiSet, ~0);
40201 reg_write(ohci, OHCI1394_PhyReqFilterLoSet, ~0);
40202 }
40203+#endif
40204
40205 spin_unlock_irq(&ohci->lock);
40206
40207@@ -2584,8 +2586,10 @@ static int ohci_enable_phys_dma(struct fw_card *card,
40208 unsigned long flags;
40209 int n, ret = 0;
40210
40211+#ifndef CONFIG_GRKERNSEC
40212 if (param_remote_dma)
40213 return 0;
40214+#endif
40215
40216 /*
40217 * FIXME: Make sure this bitmask is cleared when we clear the busReset
40218diff --git a/drivers/firmware/dmi-id.c b/drivers/firmware/dmi-id.c
40219index 94a58a0..f5eba42 100644
40220--- a/drivers/firmware/dmi-id.c
40221+++ b/drivers/firmware/dmi-id.c
40222@@ -16,7 +16,7 @@
40223 struct dmi_device_attribute{
40224 struct device_attribute dev_attr;
40225 int field;
40226-};
40227+} __do_const;
40228 #define to_dmi_dev_attr(_dev_attr) \
40229 container_of(_dev_attr, struct dmi_device_attribute, dev_attr)
40230
40231diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
40232index 17afc51..0ef90cd 100644
40233--- a/drivers/firmware/dmi_scan.c
40234+++ b/drivers/firmware/dmi_scan.c
40235@@ -835,7 +835,7 @@ int dmi_walk(void (*decode)(const struct dmi_header *, void *),
40236 if (buf == NULL)
40237 return -1;
40238
40239- dmi_table(buf, dmi_len, dmi_num, decode, private_data);
40240+ dmi_table((char __force_kernel *)buf, dmi_len, dmi_num, decode, private_data);
40241
40242 dmi_unmap(buf);
40243 return 0;
40244diff --git a/drivers/firmware/efi/cper.c b/drivers/firmware/efi/cper.c
40245index 1491dd4..aa910db 100644
40246--- a/drivers/firmware/efi/cper.c
40247+++ b/drivers/firmware/efi/cper.c
40248@@ -41,12 +41,12 @@
40249 */
40250 u64 cper_next_record_id(void)
40251 {
40252- static atomic64_t seq;
40253+ static atomic64_unchecked_t seq;
40254
40255- if (!atomic64_read(&seq))
40256- atomic64_set(&seq, ((u64)get_seconds()) << 32);
40257+ if (!atomic64_read_unchecked(&seq))
40258+ atomic64_set_unchecked(&seq, ((u64)get_seconds()) << 32);
40259
40260- return atomic64_inc_return(&seq);
40261+ return atomic64_inc_return_unchecked(&seq);
40262 }
40263 EXPORT_SYMBOL_GPL(cper_next_record_id);
40264
40265diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
40266index 4753bac..02861a2 100644
40267--- a/drivers/firmware/efi/efi.c
40268+++ b/drivers/firmware/efi/efi.c
40269@@ -120,14 +120,16 @@ static struct attribute_group efi_subsys_attr_group = {
40270 };
40271
40272 static struct efivars generic_efivars;
40273-static struct efivar_operations generic_ops;
40274+static efivar_operations_no_const generic_ops __read_only;
40275
40276 static int generic_ops_register(void)
40277 {
40278- generic_ops.get_variable = efi.get_variable;
40279- generic_ops.set_variable = efi.set_variable;
40280- generic_ops.get_next_variable = efi.get_next_variable;
40281- generic_ops.query_variable_store = efi_query_variable_store;
40282+ pax_open_kernel();
40283+ *(void **)&generic_ops.get_variable = efi.get_variable;
40284+ *(void **)&generic_ops.set_variable = efi.set_variable;
40285+ *(void **)&generic_ops.get_next_variable = efi.get_next_variable;
40286+ *(void **)&generic_ops.query_variable_store = efi_query_variable_store;
40287+ pax_close_kernel();
40288
40289 return efivars_register(&generic_efivars, &generic_ops, efi_kobj);
40290 }
40291diff --git a/drivers/firmware/efi/efivars.c b/drivers/firmware/efi/efivars.c
40292index 3dc2482..7bd2f61 100644
40293--- a/drivers/firmware/efi/efivars.c
40294+++ b/drivers/firmware/efi/efivars.c
40295@@ -456,7 +456,7 @@ efivar_create_sysfs_entry(struct efivar_entry *new_var)
40296 static int
40297 create_efivars_bin_attributes(void)
40298 {
40299- struct bin_attribute *attr;
40300+ bin_attribute_no_const *attr;
40301 int error;
40302
40303 /* new_var */
40304diff --git a/drivers/firmware/google/memconsole.c b/drivers/firmware/google/memconsole.c
40305index 2a90ba6..07f3733 100644
40306--- a/drivers/firmware/google/memconsole.c
40307+++ b/drivers/firmware/google/memconsole.c
40308@@ -147,7 +147,9 @@ static int __init memconsole_init(void)
40309 if (!found_memconsole())
40310 return -ENODEV;
40311
40312- memconsole_bin_attr.size = memconsole_length;
40313+ pax_open_kernel();
40314+ *(size_t *)&memconsole_bin_attr.size = memconsole_length;
40315+ pax_close_kernel();
40316
40317 ret = sysfs_create_bin_file(firmware_kobj, &memconsole_bin_attr);
40318
40319diff --git a/drivers/gpio/gpio-em.c b/drivers/gpio/gpio-em.c
40320index 1e98a98..b444372 100644
40321--- a/drivers/gpio/gpio-em.c
40322+++ b/drivers/gpio/gpio-em.c
40323@@ -278,7 +278,7 @@ static int em_gio_probe(struct platform_device *pdev)
40324 struct em_gio_priv *p;
40325 struct resource *io[2], *irq[2];
40326 struct gpio_chip *gpio_chip;
40327- struct irq_chip *irq_chip;
40328+ irq_chip_no_const *irq_chip;
40329 const char *name = dev_name(&pdev->dev);
40330 int ret;
40331
40332diff --git a/drivers/gpio/gpio-ich.c b/drivers/gpio/gpio-ich.c
40333index f5bf3c3..7baaa59 100644
40334--- a/drivers/gpio/gpio-ich.c
40335+++ b/drivers/gpio/gpio-ich.c
40336@@ -71,7 +71,7 @@ struct ichx_desc {
40337 /* Some chipsets have quirks, let these use their own request/get */
40338 int (*request)(struct gpio_chip *chip, unsigned offset);
40339 int (*get)(struct gpio_chip *chip, unsigned offset);
40340-};
40341+} __do_const;
40342
40343 static struct {
40344 spinlock_t lock;
40345diff --git a/drivers/gpio/gpio-rcar.c b/drivers/gpio/gpio-rcar.c
40346index ca76ce7..68b384b 100644
40347--- a/drivers/gpio/gpio-rcar.c
40348+++ b/drivers/gpio/gpio-rcar.c
40349@@ -355,7 +355,7 @@ static int gpio_rcar_probe(struct platform_device *pdev)
40350 struct gpio_rcar_priv *p;
40351 struct resource *io, *irq;
40352 struct gpio_chip *gpio_chip;
40353- struct irq_chip *irq_chip;
40354+ irq_chip_no_const *irq_chip;
40355 const char *name = dev_name(&pdev->dev);
40356 int ret;
40357
40358diff --git a/drivers/gpio/gpio-vr41xx.c b/drivers/gpio/gpio-vr41xx.c
40359index 9902732..64b62dd 100644
40360--- a/drivers/gpio/gpio-vr41xx.c
40361+++ b/drivers/gpio/gpio-vr41xx.c
40362@@ -204,7 +204,7 @@ static int giu_get_irq(unsigned int irq)
40363 printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
40364 maskl, pendl, maskh, pendh);
40365
40366- atomic_inc(&irq_err_count);
40367+ atomic_inc_unchecked(&irq_err_count);
40368
40369 return -EINVAL;
40370 }
40371diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
40372index 3b7d32d..05c2f74 100644
40373--- a/drivers/gpu/drm/drm_crtc.c
40374+++ b/drivers/gpu/drm/drm_crtc.c
40375@@ -3123,7 +3123,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
40376 goto done;
40377 }
40378
40379- if (copy_to_user(&enum_ptr[copied].name,
40380+ if (copy_to_user(enum_ptr[copied].name,
40381 &prop_enum->name, DRM_PROP_NAME_LEN)) {
40382 ret = -EFAULT;
40383 goto done;
40384diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
40385index 345be03..158368d 100644
40386--- a/drivers/gpu/drm/drm_drv.c
40387+++ b/drivers/gpu/drm/drm_drv.c
40388@@ -233,7 +233,7 @@ module_exit(drm_core_exit);
40389 /**
40390 * Copy and IOCTL return string to user space
40391 */
40392-static int drm_copy_field(char *buf, size_t *buf_len, const char *value)
40393+static int drm_copy_field(char __user *buf, size_t *buf_len, const char *value)
40394 {
40395 int len;
40396
40397@@ -303,7 +303,7 @@ long drm_ioctl(struct file *filp,
40398 struct drm_file *file_priv = filp->private_data;
40399 struct drm_device *dev;
40400 const struct drm_ioctl_desc *ioctl = NULL;
40401- drm_ioctl_t *func;
40402+ drm_ioctl_no_const_t func;
40403 unsigned int nr = DRM_IOCTL_NR(cmd);
40404 int retcode = -EINVAL;
40405 char stack_kdata[128];
40406diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
40407index 7f2af9a..1561914 100644
40408--- a/drivers/gpu/drm/drm_fops.c
40409+++ b/drivers/gpu/drm/drm_fops.c
40410@@ -97,7 +97,7 @@ int drm_open(struct inode *inode, struct file *filp)
40411 if (drm_device_is_unplugged(dev))
40412 return -ENODEV;
40413
40414- if (!dev->open_count++)
40415+ if (local_inc_return(&dev->open_count) == 1)
40416 need_setup = 1;
40417 mutex_lock(&dev->struct_mutex);
40418 old_imapping = inode->i_mapping;
40419@@ -127,7 +127,7 @@ err_undo:
40420 iput(container_of(dev->dev_mapping, struct inode, i_data));
40421 dev->dev_mapping = old_mapping;
40422 mutex_unlock(&dev->struct_mutex);
40423- dev->open_count--;
40424+ local_dec(&dev->open_count);
40425 return retcode;
40426 }
40427 EXPORT_SYMBOL(drm_open);
40428@@ -463,7 +463,7 @@ int drm_release(struct inode *inode, struct file *filp)
40429
40430 mutex_lock(&drm_global_mutex);
40431
40432- DRM_DEBUG("open_count = %d\n", dev->open_count);
40433+ DRM_DEBUG("open_count = %ld\n", local_read(&dev->open_count));
40434
40435 if (dev->driver->preclose)
40436 dev->driver->preclose(dev, file_priv);
40437@@ -472,10 +472,10 @@ int drm_release(struct inode *inode, struct file *filp)
40438 * Begin inline drm_release
40439 */
40440
40441- DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
40442+ DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %ld\n",
40443 task_pid_nr(current),
40444 (long)old_encode_dev(file_priv->minor->device),
40445- dev->open_count);
40446+ local_read(&dev->open_count));
40447
40448 /* Release any auth tokens that might point to this file_priv,
40449 (do that under the drm_global_mutex) */
40450@@ -573,7 +573,7 @@ int drm_release(struct inode *inode, struct file *filp)
40451 * End inline drm_release
40452 */
40453
40454- if (!--dev->open_count) {
40455+ if (local_dec_and_test(&dev->open_count)) {
40456 retcode = drm_lastclose(dev);
40457 if (drm_device_is_unplugged(dev))
40458 drm_put_dev(dev);
40459diff --git a/drivers/gpu/drm/drm_global.c b/drivers/gpu/drm/drm_global.c
40460index 3d2e91c..d31c4c9 100644
40461--- a/drivers/gpu/drm/drm_global.c
40462+++ b/drivers/gpu/drm/drm_global.c
40463@@ -36,7 +36,7 @@
40464 struct drm_global_item {
40465 struct mutex mutex;
40466 void *object;
40467- int refcount;
40468+ atomic_t refcount;
40469 };
40470
40471 static struct drm_global_item glob[DRM_GLOBAL_NUM];
40472@@ -49,7 +49,7 @@ void drm_global_init(void)
40473 struct drm_global_item *item = &glob[i];
40474 mutex_init(&item->mutex);
40475 item->object = NULL;
40476- item->refcount = 0;
40477+ atomic_set(&item->refcount, 0);
40478 }
40479 }
40480
40481@@ -59,7 +59,7 @@ void drm_global_release(void)
40482 for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
40483 struct drm_global_item *item = &glob[i];
40484 BUG_ON(item->object != NULL);
40485- BUG_ON(item->refcount != 0);
40486+ BUG_ON(atomic_read(&item->refcount) != 0);
40487 }
40488 }
40489
40490@@ -69,7 +69,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
40491 struct drm_global_item *item = &glob[ref->global_type];
40492
40493 mutex_lock(&item->mutex);
40494- if (item->refcount == 0) {
40495+ if (atomic_read(&item->refcount) == 0) {
40496 item->object = kzalloc(ref->size, GFP_KERNEL);
40497 if (unlikely(item->object == NULL)) {
40498 ret = -ENOMEM;
40499@@ -82,7 +82,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
40500 goto out_err;
40501
40502 }
40503- ++item->refcount;
40504+ atomic_inc(&item->refcount);
40505 ref->object = item->object;
40506 mutex_unlock(&item->mutex);
40507 return 0;
40508@@ -98,9 +98,9 @@ void drm_global_item_unref(struct drm_global_reference *ref)
40509 struct drm_global_item *item = &glob[ref->global_type];
40510
40511 mutex_lock(&item->mutex);
40512- BUG_ON(item->refcount == 0);
40513+ BUG_ON(atomic_read(&item->refcount) == 0);
40514 BUG_ON(ref->object != item->object);
40515- if (--item->refcount == 0) {
40516+ if (atomic_dec_and_test(&item->refcount)) {
40517 ref->release(ref);
40518 item->object = NULL;
40519 }
40520diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
40521index 7473035..a48b9c5 100644
40522--- a/drivers/gpu/drm/drm_info.c
40523+++ b/drivers/gpu/drm/drm_info.c
40524@@ -75,10 +75,13 @@ int drm_vm_info(struct seq_file *m, void *data)
40525 struct drm_local_map *map;
40526 struct drm_map_list *r_list;
40527
40528- /* Hardcoded from _DRM_FRAME_BUFFER,
40529- _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
40530- _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
40531- const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
40532+ static const char * const types[] = {
40533+ [_DRM_FRAME_BUFFER] = "FB",
40534+ [_DRM_REGISTERS] = "REG",
40535+ [_DRM_SHM] = "SHM",
40536+ [_DRM_AGP] = "AGP",
40537+ [_DRM_SCATTER_GATHER] = "SG",
40538+ [_DRM_CONSISTENT] = "PCI"};
40539 const char *type;
40540 int i;
40541
40542@@ -89,7 +92,7 @@ int drm_vm_info(struct seq_file *m, void *data)
40543 map = r_list->map;
40544 if (!map)
40545 continue;
40546- if (map->type < 0 || map->type > 5)
40547+ if (map->type >= ARRAY_SIZE(types))
40548 type = "??";
40549 else
40550 type = types[map->type];
40551@@ -261,7 +264,11 @@ int drm_vma_info(struct seq_file *m, void *data)
40552 vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
40553 vma->vm_flags & VM_LOCKED ? 'l' : '-',
40554 vma->vm_flags & VM_IO ? 'i' : '-',
40555+#ifdef CONFIG_GRKERNSEC_HIDESYM
40556+ 0);
40557+#else
40558 vma->vm_pgoff);
40559+#endif
40560
40561 #if defined(__i386__)
40562 pgprot = pgprot_val(vma->vm_page_prot);
40563diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
40564index 2f4c4343..dd12cd2 100644
40565--- a/drivers/gpu/drm/drm_ioc32.c
40566+++ b/drivers/gpu/drm/drm_ioc32.c
40567@@ -457,7 +457,7 @@ static int compat_drm_infobufs(struct file *file, unsigned int cmd,
40568 request = compat_alloc_user_space(nbytes);
40569 if (!access_ok(VERIFY_WRITE, request, nbytes))
40570 return -EFAULT;
40571- list = (struct drm_buf_desc *) (request + 1);
40572+ list = (struct drm_buf_desc __user *) (request + 1);
40573
40574 if (__put_user(count, &request->count)
40575 || __put_user(list, &request->list))
40576@@ -518,7 +518,7 @@ static int compat_drm_mapbufs(struct file *file, unsigned int cmd,
40577 request = compat_alloc_user_space(nbytes);
40578 if (!access_ok(VERIFY_WRITE, request, nbytes))
40579 return -EFAULT;
40580- list = (struct drm_buf_pub *) (request + 1);
40581+ list = (struct drm_buf_pub __user *) (request + 1);
40582
40583 if (__put_user(count, &request->count)
40584 || __put_user(list, &request->list))
40585@@ -1016,7 +1016,7 @@ static int compat_drm_wait_vblank(struct file *file, unsigned int cmd,
40586 return 0;
40587 }
40588
40589-drm_ioctl_compat_t *drm_compat_ioctls[] = {
40590+drm_ioctl_compat_t drm_compat_ioctls[] = {
40591 [DRM_IOCTL_NR(DRM_IOCTL_VERSION32)] = compat_drm_version,
40592 [DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE32)] = compat_drm_getunique,
40593 [DRM_IOCTL_NR(DRM_IOCTL_GET_MAP32)] = compat_drm_getmap,
40594@@ -1062,7 +1062,6 @@ drm_ioctl_compat_t *drm_compat_ioctls[] = {
40595 long drm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
40596 {
40597 unsigned int nr = DRM_IOCTL_NR(cmd);
40598- drm_ioctl_compat_t *fn;
40599 int ret;
40600
40601 /* Assume that ioctls without an explicit compat routine will just
40602@@ -1072,10 +1071,8 @@ long drm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
40603 if (nr >= ARRAY_SIZE(drm_compat_ioctls))
40604 return drm_ioctl(filp, cmd, arg);
40605
40606- fn = drm_compat_ioctls[nr];
40607-
40608- if (fn != NULL)
40609- ret = (*fn) (filp, cmd, arg);
40610+ if (drm_compat_ioctls[nr] != NULL)
40611+ ret = (*drm_compat_ioctls[nr]) (filp, cmd, arg);
40612 else
40613 ret = drm_ioctl(filp, cmd, arg);
40614
40615diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
40616index 98a33c580..8fd1c2b 100644
40617--- a/drivers/gpu/drm/drm_stub.c
40618+++ b/drivers/gpu/drm/drm_stub.c
40619@@ -409,7 +409,7 @@ void drm_unplug_dev(struct drm_device *dev)
40620
40621 drm_device_set_unplugged(dev);
40622
40623- if (dev->open_count == 0) {
40624+ if (local_read(&dev->open_count) == 0) {
40625 drm_put_dev(dev);
40626 }
40627 mutex_unlock(&drm_global_mutex);
40628diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
40629index c22c309..ae758c3 100644
40630--- a/drivers/gpu/drm/drm_sysfs.c
40631+++ b/drivers/gpu/drm/drm_sysfs.c
40632@@ -505,7 +505,7 @@ static void drm_sysfs_release(struct device *dev)
40633 */
40634 int drm_sysfs_device_add(struct drm_minor *minor)
40635 {
40636- char *minor_str;
40637+ const char *minor_str;
40638 int r;
40639
40640 if (minor->type == DRM_MINOR_CONTROL)
40641diff --git a/drivers/gpu/drm/i810/i810_drv.h b/drivers/gpu/drm/i810/i810_drv.h
40642index d4d16ed..8fb0b51 100644
40643--- a/drivers/gpu/drm/i810/i810_drv.h
40644+++ b/drivers/gpu/drm/i810/i810_drv.h
40645@@ -108,8 +108,8 @@ typedef struct drm_i810_private {
40646 int page_flipping;
40647
40648 wait_queue_head_t irq_queue;
40649- atomic_t irq_received;
40650- atomic_t irq_emitted;
40651+ atomic_unchecked_t irq_received;
40652+ atomic_unchecked_t irq_emitted;
40653
40654 int front_offset;
40655 } drm_i810_private_t;
40656diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
40657index b2b46c5..feb9fe7 100644
40658--- a/drivers/gpu/drm/i915/i915_debugfs.c
40659+++ b/drivers/gpu/drm/i915/i915_debugfs.c
40660@@ -713,7 +713,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
40661 I915_READ(GTIMR));
40662 }
40663 seq_printf(m, "Interrupts received: %d\n",
40664- atomic_read(&dev_priv->irq_received));
40665+ atomic_read_unchecked(&dev_priv->irq_received));
40666 for_each_ring(ring, dev_priv, i) {
40667 if (INTEL_INFO(dev)->gen >= 6) {
40668 seq_printf(m,
40669diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
40670index 15a74f9..4278889 100644
40671--- a/drivers/gpu/drm/i915/i915_dma.c
40672+++ b/drivers/gpu/drm/i915/i915_dma.c
40673@@ -1273,7 +1273,7 @@ static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
40674 bool can_switch;
40675
40676 spin_lock(&dev->count_lock);
40677- can_switch = (dev->open_count == 0);
40678+ can_switch = (local_read(&dev->open_count) == 0);
40679 spin_unlock(&dev->count_lock);
40680 return can_switch;
40681 }
40682diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
40683index 697f215..6f89b7f 100644
40684--- a/drivers/gpu/drm/i915/i915_drv.h
40685+++ b/drivers/gpu/drm/i915/i915_drv.h
40686@@ -1362,7 +1362,7 @@ typedef struct drm_i915_private {
40687 drm_dma_handle_t *status_page_dmah;
40688 struct resource mch_res;
40689
40690- atomic_t irq_received;
40691+ atomic_unchecked_t irq_received;
40692
40693 /* protects the irq masks */
40694 spinlock_t irq_lock;
40695diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
40696index d269ecf..6d857bc 100644
40697--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
40698+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
40699@@ -860,9 +860,9 @@ i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
40700
40701 static int
40702 validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
40703- int count)
40704+ unsigned int count)
40705 {
40706- int i;
40707+ unsigned int i;
40708 unsigned relocs_total = 0;
40709 unsigned relocs_max = UINT_MAX / sizeof(struct drm_i915_gem_relocation_entry);
40710
40711diff --git a/drivers/gpu/drm/i915/i915_ioc32.c b/drivers/gpu/drm/i915/i915_ioc32.c
40712index 3c59584..500f2e9 100644
40713--- a/drivers/gpu/drm/i915/i915_ioc32.c
40714+++ b/drivers/gpu/drm/i915/i915_ioc32.c
40715@@ -181,7 +181,7 @@ static int compat_i915_alloc(struct file *file, unsigned int cmd,
40716 (unsigned long)request);
40717 }
40718
40719-static drm_ioctl_compat_t *i915_compat_ioctls[] = {
40720+static drm_ioctl_compat_t i915_compat_ioctls[] = {
40721 [DRM_I915_BATCHBUFFER] = compat_i915_batchbuffer,
40722 [DRM_I915_CMDBUFFER] = compat_i915_cmdbuffer,
40723 [DRM_I915_GETPARAM] = compat_i915_getparam,
40724@@ -202,18 +202,15 @@ static drm_ioctl_compat_t *i915_compat_ioctls[] = {
40725 long i915_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
40726 {
40727 unsigned int nr = DRM_IOCTL_NR(cmd);
40728- drm_ioctl_compat_t *fn = NULL;
40729 int ret;
40730
40731 if (nr < DRM_COMMAND_BASE)
40732 return drm_compat_ioctl(filp, cmd, arg);
40733
40734- if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(i915_compat_ioctls))
40735- fn = i915_compat_ioctls[nr - DRM_COMMAND_BASE];
40736-
40737- if (fn != NULL)
40738+ if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(i915_compat_ioctls)) {
40739+ drm_ioctl_compat_t fn = i915_compat_ioctls[nr - DRM_COMMAND_BASE];
40740 ret = (*fn) (filp, cmd, arg);
40741- else
40742+ } else
40743 ret = drm_ioctl(filp, cmd, arg);
40744
40745 return ret;
40746diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
40747index 4050450..f67c5c1 100644
40748--- a/drivers/gpu/drm/i915/i915_irq.c
40749+++ b/drivers/gpu/drm/i915/i915_irq.c
40750@@ -1448,7 +1448,7 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
40751 int pipe;
40752 u32 pipe_stats[I915_MAX_PIPES];
40753
40754- atomic_inc(&dev_priv->irq_received);
40755+ atomic_inc_unchecked(&dev_priv->irq_received);
40756
40757 while (true) {
40758 iir = I915_READ(VLV_IIR);
40759@@ -1761,7 +1761,7 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
40760 u32 de_iir, gt_iir, de_ier, sde_ier = 0;
40761 irqreturn_t ret = IRQ_NONE;
40762
40763- atomic_inc(&dev_priv->irq_received);
40764+ atomic_inc_unchecked(&dev_priv->irq_received);
40765
40766 /* We get interrupts on unclaimed registers, so check for this before we
40767 * do any I915_{READ,WRITE}. */
40768@@ -1831,7 +1831,7 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
40769 uint32_t tmp = 0;
40770 enum pipe pipe;
40771
40772- atomic_inc(&dev_priv->irq_received);
40773+ atomic_inc_unchecked(&dev_priv->irq_received);
40774
40775 master_ctl = I915_READ(GEN8_MASTER_IRQ);
40776 master_ctl &= ~GEN8_MASTER_IRQ_CONTROL;
40777@@ -2655,7 +2655,7 @@ static void ironlake_irq_preinstall(struct drm_device *dev)
40778 {
40779 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
40780
40781- atomic_set(&dev_priv->irq_received, 0);
40782+ atomic_set_unchecked(&dev_priv->irq_received, 0);
40783
40784 I915_WRITE(HWSTAM, 0xeffe);
40785
40786@@ -2673,7 +2673,7 @@ static void valleyview_irq_preinstall(struct drm_device *dev)
40787 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
40788 int pipe;
40789
40790- atomic_set(&dev_priv->irq_received, 0);
40791+ atomic_set_unchecked(&dev_priv->irq_received, 0);
40792
40793 /* VLV magic */
40794 I915_WRITE(VLV_IMR, 0);
40795@@ -2704,7 +2704,7 @@ static void gen8_irq_preinstall(struct drm_device *dev)
40796 struct drm_i915_private *dev_priv = dev->dev_private;
40797 int pipe;
40798
40799- atomic_set(&dev_priv->irq_received, 0);
40800+ atomic_set_unchecked(&dev_priv->irq_received, 0);
40801
40802 I915_WRITE(GEN8_MASTER_IRQ, 0);
40803 POSTING_READ(GEN8_MASTER_IRQ);
40804@@ -3028,7 +3028,7 @@ static void gen8_irq_uninstall(struct drm_device *dev)
40805 if (!dev_priv)
40806 return;
40807
40808- atomic_set(&dev_priv->irq_received, 0);
40809+ atomic_set_unchecked(&dev_priv->irq_received, 0);
40810
40811 I915_WRITE(GEN8_MASTER_IRQ, 0);
40812
40813@@ -3122,7 +3122,7 @@ static void i8xx_irq_preinstall(struct drm_device * dev)
40814 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
40815 int pipe;
40816
40817- atomic_set(&dev_priv->irq_received, 0);
40818+ atomic_set_unchecked(&dev_priv->irq_received, 0);
40819
40820 for_each_pipe(pipe)
40821 I915_WRITE(PIPESTAT(pipe), 0);
40822@@ -3208,7 +3208,7 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
40823 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
40824 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
40825
40826- atomic_inc(&dev_priv->irq_received);
40827+ atomic_inc_unchecked(&dev_priv->irq_received);
40828
40829 iir = I915_READ16(IIR);
40830 if (iir == 0)
40831@@ -3287,7 +3287,7 @@ static void i915_irq_preinstall(struct drm_device * dev)
40832 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
40833 int pipe;
40834
40835- atomic_set(&dev_priv->irq_received, 0);
40836+ atomic_set_unchecked(&dev_priv->irq_received, 0);
40837
40838 if (I915_HAS_HOTPLUG(dev)) {
40839 I915_WRITE(PORT_HOTPLUG_EN, 0);
40840@@ -3394,7 +3394,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
40841 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
40842 int pipe, ret = IRQ_NONE;
40843
40844- atomic_inc(&dev_priv->irq_received);
40845+ atomic_inc_unchecked(&dev_priv->irq_received);
40846
40847 iir = I915_READ(IIR);
40848 do {
40849@@ -3521,7 +3521,7 @@ static void i965_irq_preinstall(struct drm_device * dev)
40850 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
40851 int pipe;
40852
40853- atomic_set(&dev_priv->irq_received, 0);
40854+ atomic_set_unchecked(&dev_priv->irq_received, 0);
40855
40856 I915_WRITE(PORT_HOTPLUG_EN, 0);
40857 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
40858@@ -3637,7 +3637,7 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
40859 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
40860 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
40861
40862- atomic_inc(&dev_priv->irq_received);
40863+ atomic_inc_unchecked(&dev_priv->irq_received);
40864
40865 iir = I915_READ(IIR);
40866
40867diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
40868index 9d4d837..6836e22 100644
40869--- a/drivers/gpu/drm/i915/intel_display.c
40870+++ b/drivers/gpu/drm/i915/intel_display.c
40871@@ -10798,13 +10798,13 @@ struct intel_quirk {
40872 int subsystem_vendor;
40873 int subsystem_device;
40874 void (*hook)(struct drm_device *dev);
40875-};
40876+} __do_const;
40877
40878 /* For systems that don't have a meaningful PCI subdevice/subvendor ID */
40879 struct intel_dmi_quirk {
40880 void (*hook)(struct drm_device *dev);
40881 const struct dmi_system_id (*dmi_id_list)[];
40882-};
40883+} __do_const;
40884
40885 static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
40886 {
40887@@ -10812,18 +10812,20 @@ static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
40888 return 1;
40889 }
40890
40891-static const struct intel_dmi_quirk intel_dmi_quirks[] = {
40892+static const struct dmi_system_id intel_dmi_quirks_table[] = {
40893 {
40894- .dmi_id_list = &(const struct dmi_system_id[]) {
40895- {
40896- .callback = intel_dmi_reverse_brightness,
40897- .ident = "NCR Corporation",
40898- .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
40899- DMI_MATCH(DMI_PRODUCT_NAME, ""),
40900- },
40901- },
40902- { } /* terminating entry */
40903+ .callback = intel_dmi_reverse_brightness,
40904+ .ident = "NCR Corporation",
40905+ .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
40906+ DMI_MATCH(DMI_PRODUCT_NAME, ""),
40907 },
40908+ },
40909+ { } /* terminating entry */
40910+};
40911+
40912+static const struct intel_dmi_quirk intel_dmi_quirks[] = {
40913+ {
40914+ .dmi_id_list = &intel_dmi_quirks_table,
40915 .hook = quirk_invert_brightness,
40916 },
40917 };
40918diff --git a/drivers/gpu/drm/mga/mga_drv.h b/drivers/gpu/drm/mga/mga_drv.h
40919index fe45321..836fdca 100644
40920--- a/drivers/gpu/drm/mga/mga_drv.h
40921+++ b/drivers/gpu/drm/mga/mga_drv.h
40922@@ -120,9 +120,9 @@ typedef struct drm_mga_private {
40923 u32 clear_cmd;
40924 u32 maccess;
40925
40926- atomic_t vbl_received; /**< Number of vblanks received. */
40927+ atomic_unchecked_t vbl_received; /**< Number of vblanks received. */
40928 wait_queue_head_t fence_queue;
40929- atomic_t last_fence_retired;
40930+ atomic_unchecked_t last_fence_retired;
40931 u32 next_fence_to_post;
40932
40933 unsigned int fb_cpp;
40934diff --git a/drivers/gpu/drm/mga/mga_ioc32.c b/drivers/gpu/drm/mga/mga_ioc32.c
40935index 86b4bb8..ae237ad 100644
40936--- a/drivers/gpu/drm/mga/mga_ioc32.c
40937+++ b/drivers/gpu/drm/mga/mga_ioc32.c
40938@@ -190,7 +190,7 @@ static int compat_mga_dma_bootstrap(struct file *file, unsigned int cmd,
40939 return 0;
40940 }
40941
40942-drm_ioctl_compat_t *mga_compat_ioctls[] = {
40943+drm_ioctl_compat_t mga_compat_ioctls[] = {
40944 [DRM_MGA_INIT] = compat_mga_init,
40945 [DRM_MGA_GETPARAM] = compat_mga_getparam,
40946 [DRM_MGA_DMA_BOOTSTRAP] = compat_mga_dma_bootstrap,
40947@@ -208,18 +208,15 @@ drm_ioctl_compat_t *mga_compat_ioctls[] = {
40948 long mga_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
40949 {
40950 unsigned int nr = DRM_IOCTL_NR(cmd);
40951- drm_ioctl_compat_t *fn = NULL;
40952 int ret;
40953
40954 if (nr < DRM_COMMAND_BASE)
40955 return drm_compat_ioctl(filp, cmd, arg);
40956
40957- if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(mga_compat_ioctls))
40958- fn = mga_compat_ioctls[nr - DRM_COMMAND_BASE];
40959-
40960- if (fn != NULL)
40961+ if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(mga_compat_ioctls)) {
40962+ drm_ioctl_compat_t fn = mga_compat_ioctls[nr - DRM_COMMAND_BASE];
40963 ret = (*fn) (filp, cmd, arg);
40964- else
40965+ } else
40966 ret = drm_ioctl(filp, cmd, arg);
40967
40968 return ret;
40969diff --git a/drivers/gpu/drm/mga/mga_irq.c b/drivers/gpu/drm/mga/mga_irq.c
40970index 1b071b8..de8601a 100644
40971--- a/drivers/gpu/drm/mga/mga_irq.c
40972+++ b/drivers/gpu/drm/mga/mga_irq.c
40973@@ -43,7 +43,7 @@ u32 mga_get_vblank_counter(struct drm_device *dev, int crtc)
40974 if (crtc != 0)
40975 return 0;
40976
40977- return atomic_read(&dev_priv->vbl_received);
40978+ return atomic_read_unchecked(&dev_priv->vbl_received);
40979 }
40980
40981
40982@@ -59,7 +59,7 @@ irqreturn_t mga_driver_irq_handler(int irq, void *arg)
40983 /* VBLANK interrupt */
40984 if (status & MGA_VLINEPEN) {
40985 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
40986- atomic_inc(&dev_priv->vbl_received);
40987+ atomic_inc_unchecked(&dev_priv->vbl_received);
40988 drm_handle_vblank(dev, 0);
40989 handled = 1;
40990 }
40991@@ -78,7 +78,7 @@ irqreturn_t mga_driver_irq_handler(int irq, void *arg)
40992 if ((prim_start & ~0x03) != (prim_end & ~0x03))
40993 MGA_WRITE(MGA_PRIMEND, prim_end);
40994
40995- atomic_inc(&dev_priv->last_fence_retired);
40996+ atomic_inc_unchecked(&dev_priv->last_fence_retired);
40997 wake_up(&dev_priv->fence_queue);
40998 handled = 1;
40999 }
41000@@ -129,7 +129,7 @@ int mga_driver_fence_wait(struct drm_device *dev, unsigned int *sequence)
41001 * using fences.
41002 */
41003 DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * HZ,
41004- (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
41005+ (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
41006 - *sequence) <= (1 << 23)));
41007
41008 *sequence = cur_fence;
41009diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
41010index 4c3feaa..26391ce 100644
41011--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
41012+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
41013@@ -965,7 +965,7 @@ static int parse_bit_tmds_tbl_entry(struct drm_device *dev, struct nvbios *bios,
41014 struct bit_table {
41015 const char id;
41016 int (* const parse_fn)(struct drm_device *, struct nvbios *, struct bit_entry *);
41017-};
41018+} __no_const;
41019
41020 #define BIT_TABLE(id, funcid) ((struct bit_table){ id, parse_bit_##funcid##_tbl_entry })
41021
41022diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.h b/drivers/gpu/drm/nouveau/nouveau_drm.h
41023index 23ca7a5..b6c955d 100644
41024--- a/drivers/gpu/drm/nouveau/nouveau_drm.h
41025+++ b/drivers/gpu/drm/nouveau/nouveau_drm.h
41026@@ -97,7 +97,6 @@ struct nouveau_drm {
41027 struct drm_global_reference mem_global_ref;
41028 struct ttm_bo_global_ref bo_global_ref;
41029 struct ttm_bo_device bdev;
41030- atomic_t validate_sequence;
41031 int (*move)(struct nouveau_channel *,
41032 struct ttm_buffer_object *,
41033 struct ttm_mem_reg *, struct ttm_mem_reg *);
41034diff --git a/drivers/gpu/drm/nouveau/nouveau_ioc32.c b/drivers/gpu/drm/nouveau/nouveau_ioc32.c
41035index c1a7e5a..38b8539 100644
41036--- a/drivers/gpu/drm/nouveau/nouveau_ioc32.c
41037+++ b/drivers/gpu/drm/nouveau/nouveau_ioc32.c
41038@@ -50,7 +50,7 @@ long nouveau_compat_ioctl(struct file *filp, unsigned int cmd,
41039 unsigned long arg)
41040 {
41041 unsigned int nr = DRM_IOCTL_NR(cmd);
41042- drm_ioctl_compat_t *fn = NULL;
41043+ drm_ioctl_compat_t fn = NULL;
41044 int ret;
41045
41046 if (nr < DRM_COMMAND_BASE)
41047diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
41048index d45d50d..72a5dd2 100644
41049--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
41050+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
41051@@ -130,11 +130,11 @@ nouveau_vram_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
41052 }
41053
41054 const struct ttm_mem_type_manager_func nouveau_vram_manager = {
41055- nouveau_vram_manager_init,
41056- nouveau_vram_manager_fini,
41057- nouveau_vram_manager_new,
41058- nouveau_vram_manager_del,
41059- nouveau_vram_manager_debug
41060+ .init = nouveau_vram_manager_init,
41061+ .takedown = nouveau_vram_manager_fini,
41062+ .get_node = nouveau_vram_manager_new,
41063+ .put_node = nouveau_vram_manager_del,
41064+ .debug = nouveau_vram_manager_debug
41065 };
41066
41067 static int
41068@@ -199,11 +199,11 @@ nouveau_gart_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
41069 }
41070
41071 const struct ttm_mem_type_manager_func nouveau_gart_manager = {
41072- nouveau_gart_manager_init,
41073- nouveau_gart_manager_fini,
41074- nouveau_gart_manager_new,
41075- nouveau_gart_manager_del,
41076- nouveau_gart_manager_debug
41077+ .init = nouveau_gart_manager_init,
41078+ .takedown = nouveau_gart_manager_fini,
41079+ .get_node = nouveau_gart_manager_new,
41080+ .put_node = nouveau_gart_manager_del,
41081+ .debug = nouveau_gart_manager_debug
41082 };
41083
41084 #include <core/subdev/vm/nv04.h>
41085@@ -271,11 +271,11 @@ nv04_gart_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
41086 }
41087
41088 const struct ttm_mem_type_manager_func nv04_gart_manager = {
41089- nv04_gart_manager_init,
41090- nv04_gart_manager_fini,
41091- nv04_gart_manager_new,
41092- nv04_gart_manager_del,
41093- nv04_gart_manager_debug
41094+ .init = nv04_gart_manager_init,
41095+ .takedown = nv04_gart_manager_fini,
41096+ .get_node = nv04_gart_manager_new,
41097+ .put_node = nv04_gart_manager_del,
41098+ .debug = nv04_gart_manager_debug
41099 };
41100
41101 int
41102diff --git a/drivers/gpu/drm/nouveau/nouveau_vga.c b/drivers/gpu/drm/nouveau/nouveau_vga.c
41103index 471347e..5adc6b9 100644
41104--- a/drivers/gpu/drm/nouveau/nouveau_vga.c
41105+++ b/drivers/gpu/drm/nouveau/nouveau_vga.c
41106@@ -67,7 +67,7 @@ nouveau_switcheroo_can_switch(struct pci_dev *pdev)
41107 bool can_switch;
41108
41109 spin_lock(&dev->count_lock);
41110- can_switch = (dev->open_count == 0);
41111+ can_switch = (local_read(&dev->open_count) == 0);
41112 spin_unlock(&dev->count_lock);
41113 return can_switch;
41114 }
41115diff --git a/drivers/gpu/drm/qxl/qxl_cmd.c b/drivers/gpu/drm/qxl/qxl_cmd.c
41116index eb89653..613cf71 100644
41117--- a/drivers/gpu/drm/qxl/qxl_cmd.c
41118+++ b/drivers/gpu/drm/qxl/qxl_cmd.c
41119@@ -285,27 +285,27 @@ static int wait_for_io_cmd_user(struct qxl_device *qdev, uint8_t val, long port,
41120 int ret;
41121
41122 mutex_lock(&qdev->async_io_mutex);
41123- irq_num = atomic_read(&qdev->irq_received_io_cmd);
41124+ irq_num = atomic_read_unchecked(&qdev->irq_received_io_cmd);
41125 if (qdev->last_sent_io_cmd > irq_num) {
41126 if (intr)
41127 ret = wait_event_interruptible_timeout(qdev->io_cmd_event,
41128- atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
41129+ atomic_read_unchecked(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
41130 else
41131 ret = wait_event_timeout(qdev->io_cmd_event,
41132- atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
41133+ atomic_read_unchecked(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
41134 /* 0 is timeout, just bail the "hw" has gone away */
41135 if (ret <= 0)
41136 goto out;
41137- irq_num = atomic_read(&qdev->irq_received_io_cmd);
41138+ irq_num = atomic_read_unchecked(&qdev->irq_received_io_cmd);
41139 }
41140 outb(val, addr);
41141 qdev->last_sent_io_cmd = irq_num + 1;
41142 if (intr)
41143 ret = wait_event_interruptible_timeout(qdev->io_cmd_event,
41144- atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
41145+ atomic_read_unchecked(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
41146 else
41147 ret = wait_event_timeout(qdev->io_cmd_event,
41148- atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
41149+ atomic_read_unchecked(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
41150 out:
41151 if (ret > 0)
41152 ret = 0;
41153diff --git a/drivers/gpu/drm/qxl/qxl_debugfs.c b/drivers/gpu/drm/qxl/qxl_debugfs.c
41154index c3c2bbd..bc3c0fb 100644
41155--- a/drivers/gpu/drm/qxl/qxl_debugfs.c
41156+++ b/drivers/gpu/drm/qxl/qxl_debugfs.c
41157@@ -42,10 +42,10 @@ qxl_debugfs_irq_received(struct seq_file *m, void *data)
41158 struct drm_info_node *node = (struct drm_info_node *) m->private;
41159 struct qxl_device *qdev = node->minor->dev->dev_private;
41160
41161- seq_printf(m, "%d\n", atomic_read(&qdev->irq_received));
41162- seq_printf(m, "%d\n", atomic_read(&qdev->irq_received_display));
41163- seq_printf(m, "%d\n", atomic_read(&qdev->irq_received_cursor));
41164- seq_printf(m, "%d\n", atomic_read(&qdev->irq_received_io_cmd));
41165+ seq_printf(m, "%d\n", atomic_read_unchecked(&qdev->irq_received));
41166+ seq_printf(m, "%d\n", atomic_read_unchecked(&qdev->irq_received_display));
41167+ seq_printf(m, "%d\n", atomic_read_unchecked(&qdev->irq_received_cursor));
41168+ seq_printf(m, "%d\n", atomic_read_unchecked(&qdev->irq_received_io_cmd));
41169 seq_printf(m, "%d\n", qdev->irq_received_error);
41170 return 0;
41171 }
41172diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
41173index 36ed40b..0397633 100644
41174--- a/drivers/gpu/drm/qxl/qxl_drv.h
41175+++ b/drivers/gpu/drm/qxl/qxl_drv.h
41176@@ -290,10 +290,10 @@ struct qxl_device {
41177 unsigned int last_sent_io_cmd;
41178
41179 /* interrupt handling */
41180- atomic_t irq_received;
41181- atomic_t irq_received_display;
41182- atomic_t irq_received_cursor;
41183- atomic_t irq_received_io_cmd;
41184+ atomic_unchecked_t irq_received;
41185+ atomic_unchecked_t irq_received_display;
41186+ atomic_unchecked_t irq_received_cursor;
41187+ atomic_unchecked_t irq_received_io_cmd;
41188 unsigned irq_received_error;
41189 wait_queue_head_t display_event;
41190 wait_queue_head_t cursor_event;
41191diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c
41192index 0bb86e6..d41416d 100644
41193--- a/drivers/gpu/drm/qxl/qxl_ioctl.c
41194+++ b/drivers/gpu/drm/qxl/qxl_ioctl.c
41195@@ -181,7 +181,7 @@ static int qxl_process_single_command(struct qxl_device *qdev,
41196
41197 /* TODO copy slow path code from i915 */
41198 fb_cmd = qxl_bo_kmap_atomic_page(qdev, cmd_bo, (release->release_offset & PAGE_SIZE));
41199- unwritten = __copy_from_user_inatomic_nocache(fb_cmd + sizeof(union qxl_release_info) + (release->release_offset & ~PAGE_SIZE), (void *)(unsigned long)cmd->command, cmd->command_size);
41200+ unwritten = __copy_from_user_inatomic_nocache(fb_cmd + sizeof(union qxl_release_info) + (release->release_offset & ~PAGE_SIZE), (void __force_user *)(unsigned long)cmd->command, cmd->command_size);
41201
41202 {
41203 struct qxl_drawable *draw = fb_cmd;
41204@@ -201,7 +201,7 @@ static int qxl_process_single_command(struct qxl_device *qdev,
41205 struct drm_qxl_reloc reloc;
41206
41207 if (copy_from_user(&reloc,
41208- &((struct drm_qxl_reloc *)(uintptr_t)cmd->relocs)[i],
41209+ &((struct drm_qxl_reloc __force_user *)(uintptr_t)cmd->relocs)[i],
41210 sizeof(reloc))) {
41211 ret = -EFAULT;
41212 goto out_free_bos;
41213@@ -294,10 +294,10 @@ static int qxl_execbuffer_ioctl(struct drm_device *dev, void *data,
41214
41215 for (cmd_num = 0; cmd_num < execbuffer->commands_num; ++cmd_num) {
41216
41217- struct drm_qxl_command *commands =
41218- (struct drm_qxl_command *)(uintptr_t)execbuffer->commands;
41219+ struct drm_qxl_command __user *commands =
41220+ (struct drm_qxl_command __user *)(uintptr_t)execbuffer->commands;
41221
41222- if (copy_from_user(&user_cmd, &commands[cmd_num],
41223+ if (copy_from_user(&user_cmd, (struct drm_qxl_command __force_user *)&commands[cmd_num],
41224 sizeof(user_cmd)))
41225 return -EFAULT;
41226
41227diff --git a/drivers/gpu/drm/qxl/qxl_irq.c b/drivers/gpu/drm/qxl/qxl_irq.c
41228index 28f84b4..fb3e224 100644
41229--- a/drivers/gpu/drm/qxl/qxl_irq.c
41230+++ b/drivers/gpu/drm/qxl/qxl_irq.c
41231@@ -33,19 +33,19 @@ irqreturn_t qxl_irq_handler(int irq, void *arg)
41232
41233 pending = xchg(&qdev->ram_header->int_pending, 0);
41234
41235- atomic_inc(&qdev->irq_received);
41236+ atomic_inc_unchecked(&qdev->irq_received);
41237
41238 if (pending & QXL_INTERRUPT_DISPLAY) {
41239- atomic_inc(&qdev->irq_received_display);
41240+ atomic_inc_unchecked(&qdev->irq_received_display);
41241 wake_up_all(&qdev->display_event);
41242 qxl_queue_garbage_collect(qdev, false);
41243 }
41244 if (pending & QXL_INTERRUPT_CURSOR) {
41245- atomic_inc(&qdev->irq_received_cursor);
41246+ atomic_inc_unchecked(&qdev->irq_received_cursor);
41247 wake_up_all(&qdev->cursor_event);
41248 }
41249 if (pending & QXL_INTERRUPT_IO_CMD) {
41250- atomic_inc(&qdev->irq_received_io_cmd);
41251+ atomic_inc_unchecked(&qdev->irq_received_io_cmd);
41252 wake_up_all(&qdev->io_cmd_event);
41253 }
41254 if (pending & QXL_INTERRUPT_ERROR) {
41255@@ -82,10 +82,10 @@ int qxl_irq_init(struct qxl_device *qdev)
41256 init_waitqueue_head(&qdev->io_cmd_event);
41257 INIT_WORK(&qdev->client_monitors_config_work,
41258 qxl_client_monitors_config_work_func);
41259- atomic_set(&qdev->irq_received, 0);
41260- atomic_set(&qdev->irq_received_display, 0);
41261- atomic_set(&qdev->irq_received_cursor, 0);
41262- atomic_set(&qdev->irq_received_io_cmd, 0);
41263+ atomic_set_unchecked(&qdev->irq_received, 0);
41264+ atomic_set_unchecked(&qdev->irq_received_display, 0);
41265+ atomic_set_unchecked(&qdev->irq_received_cursor, 0);
41266+ atomic_set_unchecked(&qdev->irq_received_io_cmd, 0);
41267 qdev->irq_received_error = 0;
41268 ret = drm_irq_install(qdev->ddev);
41269 qdev->ram_header->int_mask = QXL_INTERRUPT_MASK;
41270diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c
41271index c82c1d6a9..6158c02 100644
41272--- a/drivers/gpu/drm/qxl/qxl_ttm.c
41273+++ b/drivers/gpu/drm/qxl/qxl_ttm.c
41274@@ -103,7 +103,7 @@ static void qxl_ttm_global_fini(struct qxl_device *qdev)
41275 }
41276 }
41277
41278-static struct vm_operations_struct qxl_ttm_vm_ops;
41279+static vm_operations_struct_no_const qxl_ttm_vm_ops __read_only;
41280 static const struct vm_operations_struct *ttm_vm_ops;
41281
41282 static int qxl_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
41283@@ -147,8 +147,10 @@ int qxl_mmap(struct file *filp, struct vm_area_struct *vma)
41284 return r;
41285 if (unlikely(ttm_vm_ops == NULL)) {
41286 ttm_vm_ops = vma->vm_ops;
41287+ pax_open_kernel();
41288 qxl_ttm_vm_ops = *ttm_vm_ops;
41289 qxl_ttm_vm_ops.fault = &qxl_ttm_fault;
41290+ pax_close_kernel();
41291 }
41292 vma->vm_ops = &qxl_ttm_vm_ops;
41293 return 0;
41294@@ -561,25 +563,23 @@ static int qxl_mm_dump_table(struct seq_file *m, void *data)
41295 static int qxl_ttm_debugfs_init(struct qxl_device *qdev)
41296 {
41297 #if defined(CONFIG_DEBUG_FS)
41298- static struct drm_info_list qxl_mem_types_list[QXL_DEBUGFS_MEM_TYPES];
41299- static char qxl_mem_types_names[QXL_DEBUGFS_MEM_TYPES][32];
41300- unsigned i;
41301+ static struct drm_info_list qxl_mem_types_list[QXL_DEBUGFS_MEM_TYPES] = {
41302+ {
41303+ .name = "qxl_mem_mm",
41304+ .show = &qxl_mm_dump_table,
41305+ },
41306+ {
41307+ .name = "qxl_surf_mm",
41308+ .show = &qxl_mm_dump_table,
41309+ }
41310+ };
41311
41312- for (i = 0; i < QXL_DEBUGFS_MEM_TYPES; i++) {
41313- if (i == 0)
41314- sprintf(qxl_mem_types_names[i], "qxl_mem_mm");
41315- else
41316- sprintf(qxl_mem_types_names[i], "qxl_surf_mm");
41317- qxl_mem_types_list[i].name = qxl_mem_types_names[i];
41318- qxl_mem_types_list[i].show = &qxl_mm_dump_table;
41319- qxl_mem_types_list[i].driver_features = 0;
41320- if (i == 0)
41321- qxl_mem_types_list[i].data = qdev->mman.bdev.man[TTM_PL_VRAM].priv;
41322- else
41323- qxl_mem_types_list[i].data = qdev->mman.bdev.man[TTM_PL_PRIV0].priv;
41324+ pax_open_kernel();
41325+ *(void **)&qxl_mem_types_list[0].data = qdev->mman.bdev.man[TTM_PL_VRAM].priv;
41326+ *(void **)&qxl_mem_types_list[1].data = qdev->mman.bdev.man[TTM_PL_PRIV0].priv;
41327+ pax_close_kernel();
41328
41329- }
41330- return qxl_debugfs_add_files(qdev, qxl_mem_types_list, i);
41331+ return qxl_debugfs_add_files(qdev, qxl_mem_types_list, QXL_DEBUGFS_MEM_TYPES);
41332 #else
41333 return 0;
41334 #endif
41335diff --git a/drivers/gpu/drm/r128/r128_cce.c b/drivers/gpu/drm/r128/r128_cce.c
41336index 59459fe..be26b31 100644
41337--- a/drivers/gpu/drm/r128/r128_cce.c
41338+++ b/drivers/gpu/drm/r128/r128_cce.c
41339@@ -377,7 +377,7 @@ static int r128_do_init_cce(struct drm_device *dev, drm_r128_init_t *init)
41340
41341 /* GH: Simple idle check.
41342 */
41343- atomic_set(&dev_priv->idle_count, 0);
41344+ atomic_set_unchecked(&dev_priv->idle_count, 0);
41345
41346 /* We don't support anything other than bus-mastering ring mode,
41347 * but the ring can be in either AGP or PCI space for the ring
41348diff --git a/drivers/gpu/drm/r128/r128_drv.h b/drivers/gpu/drm/r128/r128_drv.h
41349index 5bf3f5f..7000661 100644
41350--- a/drivers/gpu/drm/r128/r128_drv.h
41351+++ b/drivers/gpu/drm/r128/r128_drv.h
41352@@ -90,14 +90,14 @@ typedef struct drm_r128_private {
41353 int is_pci;
41354 unsigned long cce_buffers_offset;
41355
41356- atomic_t idle_count;
41357+ atomic_unchecked_t idle_count;
41358
41359 int page_flipping;
41360 int current_page;
41361 u32 crtc_offset;
41362 u32 crtc_offset_cntl;
41363
41364- atomic_t vbl_received;
41365+ atomic_unchecked_t vbl_received;
41366
41367 u32 color_fmt;
41368 unsigned int front_offset;
41369diff --git a/drivers/gpu/drm/r128/r128_ioc32.c b/drivers/gpu/drm/r128/r128_ioc32.c
41370index b0d0fd3..a6fbbe4 100644
41371--- a/drivers/gpu/drm/r128/r128_ioc32.c
41372+++ b/drivers/gpu/drm/r128/r128_ioc32.c
41373@@ -178,7 +178,7 @@ static int compat_r128_getparam(struct file *file, unsigned int cmd,
41374 return drm_ioctl(file, DRM_IOCTL_R128_GETPARAM, (unsigned long)getparam);
41375 }
41376
41377-drm_ioctl_compat_t *r128_compat_ioctls[] = {
41378+drm_ioctl_compat_t r128_compat_ioctls[] = {
41379 [DRM_R128_INIT] = compat_r128_init,
41380 [DRM_R128_DEPTH] = compat_r128_depth,
41381 [DRM_R128_STIPPLE] = compat_r128_stipple,
41382@@ -197,18 +197,15 @@ drm_ioctl_compat_t *r128_compat_ioctls[] = {
41383 long r128_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
41384 {
41385 unsigned int nr = DRM_IOCTL_NR(cmd);
41386- drm_ioctl_compat_t *fn = NULL;
41387 int ret;
41388
41389 if (nr < DRM_COMMAND_BASE)
41390 return drm_compat_ioctl(filp, cmd, arg);
41391
41392- if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(r128_compat_ioctls))
41393- fn = r128_compat_ioctls[nr - DRM_COMMAND_BASE];
41394-
41395- if (fn != NULL)
41396+ if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(r128_compat_ioctls)) {
41397+ drm_ioctl_compat_t fn = r128_compat_ioctls[nr - DRM_COMMAND_BASE];
41398 ret = (*fn) (filp, cmd, arg);
41399- else
41400+ } else
41401 ret = drm_ioctl(filp, cmd, arg);
41402
41403 return ret;
41404diff --git a/drivers/gpu/drm/r128/r128_irq.c b/drivers/gpu/drm/r128/r128_irq.c
41405index c2ae496..30b5993 100644
41406--- a/drivers/gpu/drm/r128/r128_irq.c
41407+++ b/drivers/gpu/drm/r128/r128_irq.c
41408@@ -41,7 +41,7 @@ u32 r128_get_vblank_counter(struct drm_device *dev, int crtc)
41409 if (crtc != 0)
41410 return 0;
41411
41412- return atomic_read(&dev_priv->vbl_received);
41413+ return atomic_read_unchecked(&dev_priv->vbl_received);
41414 }
41415
41416 irqreturn_t r128_driver_irq_handler(int irq, void *arg)
41417@@ -55,7 +55,7 @@ irqreturn_t r128_driver_irq_handler(int irq, void *arg)
41418 /* VBLANK interrupt */
41419 if (status & R128_CRTC_VBLANK_INT) {
41420 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
41421- atomic_inc(&dev_priv->vbl_received);
41422+ atomic_inc_unchecked(&dev_priv->vbl_received);
41423 drm_handle_vblank(dev, 0);
41424 return IRQ_HANDLED;
41425 }
41426diff --git a/drivers/gpu/drm/r128/r128_state.c b/drivers/gpu/drm/r128/r128_state.c
41427index e806dac..f81d32f 100644
41428--- a/drivers/gpu/drm/r128/r128_state.c
41429+++ b/drivers/gpu/drm/r128/r128_state.c
41430@@ -320,10 +320,10 @@ static void r128_clear_box(drm_r128_private_t *dev_priv,
41431
41432 static void r128_cce_performance_boxes(drm_r128_private_t *dev_priv)
41433 {
41434- if (atomic_read(&dev_priv->idle_count) == 0)
41435+ if (atomic_read_unchecked(&dev_priv->idle_count) == 0)
41436 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
41437 else
41438- atomic_set(&dev_priv->idle_count, 0);
41439+ atomic_set_unchecked(&dev_priv->idle_count, 0);
41440 }
41441
41442 #endif
41443diff --git a/drivers/gpu/drm/radeon/mkregtable.c b/drivers/gpu/drm/radeon/mkregtable.c
41444index 4a85bb6..aaea819 100644
41445--- a/drivers/gpu/drm/radeon/mkregtable.c
41446+++ b/drivers/gpu/drm/radeon/mkregtable.c
41447@@ -624,14 +624,14 @@ static int parser_auth(struct table *t, const char *filename)
41448 regex_t mask_rex;
41449 regmatch_t match[4];
41450 char buf[1024];
41451- size_t end;
41452+ long end;
41453 int len;
41454 int done = 0;
41455 int r;
41456 unsigned o;
41457 struct offset *offset;
41458 char last_reg_s[10];
41459- int last_reg;
41460+ unsigned long last_reg;
41461
41462 if (regcomp
41463 (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
41464diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
41465index 7f370b3..4e92ca6 100644
41466--- a/drivers/gpu/drm/radeon/radeon_device.c
41467+++ b/drivers/gpu/drm/radeon/radeon_device.c
41468@@ -1128,7 +1128,7 @@ static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
41469 bool can_switch;
41470
41471 spin_lock(&dev->count_lock);
41472- can_switch = (dev->open_count == 0);
41473+ can_switch = (local_read(&dev->open_count) == 0);
41474 spin_unlock(&dev->count_lock);
41475 return can_switch;
41476 }
41477diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
41478index dafd812..1bf20c7 100644
41479--- a/drivers/gpu/drm/radeon/radeon_drv.h
41480+++ b/drivers/gpu/drm/radeon/radeon_drv.h
41481@@ -262,7 +262,7 @@ typedef struct drm_radeon_private {
41482
41483 /* SW interrupt */
41484 wait_queue_head_t swi_queue;
41485- atomic_t swi_emitted;
41486+ atomic_unchecked_t swi_emitted;
41487 int vblank_crtc;
41488 uint32_t irq_enable_reg;
41489 uint32_t r500_disp_irq_reg;
41490diff --git a/drivers/gpu/drm/radeon/radeon_ioc32.c b/drivers/gpu/drm/radeon/radeon_ioc32.c
41491index bdb0f93..5ff558f 100644
41492--- a/drivers/gpu/drm/radeon/radeon_ioc32.c
41493+++ b/drivers/gpu/drm/radeon/radeon_ioc32.c
41494@@ -358,7 +358,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
41495 request = compat_alloc_user_space(sizeof(*request));
41496 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
41497 || __put_user(req32.param, &request->param)
41498- || __put_user((void __user *)(unsigned long)req32.value,
41499+ || __put_user((unsigned long)req32.value,
41500 &request->value))
41501 return -EFAULT;
41502
41503@@ -368,7 +368,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
41504 #define compat_radeon_cp_setparam NULL
41505 #endif /* X86_64 || IA64 */
41506
41507-static drm_ioctl_compat_t *radeon_compat_ioctls[] = {
41508+static drm_ioctl_compat_t radeon_compat_ioctls[] = {
41509 [DRM_RADEON_CP_INIT] = compat_radeon_cp_init,
41510 [DRM_RADEON_CLEAR] = compat_radeon_cp_clear,
41511 [DRM_RADEON_STIPPLE] = compat_radeon_cp_stipple,
41512@@ -393,18 +393,15 @@ static drm_ioctl_compat_t *radeon_compat_ioctls[] = {
41513 long radeon_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
41514 {
41515 unsigned int nr = DRM_IOCTL_NR(cmd);
41516- drm_ioctl_compat_t *fn = NULL;
41517 int ret;
41518
41519 if (nr < DRM_COMMAND_BASE)
41520 return drm_compat_ioctl(filp, cmd, arg);
41521
41522- if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(radeon_compat_ioctls))
41523- fn = radeon_compat_ioctls[nr - DRM_COMMAND_BASE];
41524-
41525- if (fn != NULL)
41526+ if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(radeon_compat_ioctls)) {
41527+ drm_ioctl_compat_t fn = radeon_compat_ioctls[nr - DRM_COMMAND_BASE];
41528 ret = (*fn) (filp, cmd, arg);
41529- else
41530+ } else
41531 ret = drm_ioctl(filp, cmd, arg);
41532
41533 return ret;
41534diff --git a/drivers/gpu/drm/radeon/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c
41535index 244b19b..c19226d 100644
41536--- a/drivers/gpu/drm/radeon/radeon_irq.c
41537+++ b/drivers/gpu/drm/radeon/radeon_irq.c
41538@@ -226,8 +226,8 @@ static int radeon_emit_irq(struct drm_device * dev)
41539 unsigned int ret;
41540 RING_LOCALS;
41541
41542- atomic_inc(&dev_priv->swi_emitted);
41543- ret = atomic_read(&dev_priv->swi_emitted);
41544+ atomic_inc_unchecked(&dev_priv->swi_emitted);
41545+ ret = atomic_read_unchecked(&dev_priv->swi_emitted);
41546
41547 BEGIN_RING(4);
41548 OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
41549@@ -353,7 +353,7 @@ int radeon_driver_irq_postinstall(struct drm_device *dev)
41550 drm_radeon_private_t *dev_priv =
41551 (drm_radeon_private_t *) dev->dev_private;
41552
41553- atomic_set(&dev_priv->swi_emitted, 0);
41554+ atomic_set_unchecked(&dev_priv->swi_emitted, 0);
41555 init_waitqueue_head(&dev_priv->swi_queue);
41556
41557 dev->max_vblank_count = 0x001fffff;
41558diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
41559index 956ab7f..fbd36d8 100644
41560--- a/drivers/gpu/drm/radeon/radeon_state.c
41561+++ b/drivers/gpu/drm/radeon/radeon_state.c
41562@@ -2168,7 +2168,7 @@ static int radeon_cp_clear(struct drm_device *dev, void *data, struct drm_file *
41563 if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
41564 sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
41565
41566- if (copy_from_user(&depth_boxes, clear->depth_boxes,
41567+ if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS || copy_from_user(&depth_boxes, clear->depth_boxes,
41568 sarea_priv->nbox * sizeof(depth_boxes[0])))
41569 return -EFAULT;
41570
41571@@ -3031,7 +3031,7 @@ static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_fil
41572 {
41573 drm_radeon_private_t *dev_priv = dev->dev_private;
41574 drm_radeon_getparam_t *param = data;
41575- int value;
41576+ int value = 0;
41577
41578 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
41579
41580diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
41581index 040a2a1..eae4e54 100644
41582--- a/drivers/gpu/drm/radeon/radeon_ttm.c
41583+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
41584@@ -790,7 +790,7 @@ void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size)
41585 man->size = size >> PAGE_SHIFT;
41586 }
41587
41588-static struct vm_operations_struct radeon_ttm_vm_ops;
41589+static vm_operations_struct_no_const radeon_ttm_vm_ops __read_only;
41590 static const struct vm_operations_struct *ttm_vm_ops = NULL;
41591
41592 static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
41593@@ -831,8 +831,10 @@ int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
41594 }
41595 if (unlikely(ttm_vm_ops == NULL)) {
41596 ttm_vm_ops = vma->vm_ops;
41597+ pax_open_kernel();
41598 radeon_ttm_vm_ops = *ttm_vm_ops;
41599 radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
41600+ pax_close_kernel();
41601 }
41602 vma->vm_ops = &radeon_ttm_vm_ops;
41603 return 0;
41604diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
41605index 9336006..ce78aa7 100644
41606--- a/drivers/gpu/drm/tegra/dc.c
41607+++ b/drivers/gpu/drm/tegra/dc.c
41608@@ -1057,7 +1057,7 @@ static int tegra_dc_debugfs_init(struct tegra_dc *dc, struct drm_minor *minor)
41609 }
41610
41611 for (i = 0; i < ARRAY_SIZE(debugfs_files); i++)
41612- dc->debugfs_files[i].data = dc;
41613+ *(void **)&dc->debugfs_files[i].data = dc;
41614
41615 err = drm_debugfs_create_files(dc->debugfs_files,
41616 ARRAY_SIZE(debugfs_files),
41617diff --git a/drivers/gpu/drm/tegra/dsi.c b/drivers/gpu/drm/tegra/dsi.c
41618index d452faab..f8cbc6a 100644
41619--- a/drivers/gpu/drm/tegra/dsi.c
41620+++ b/drivers/gpu/drm/tegra/dsi.c
41621@@ -53,7 +53,7 @@ struct tegra_dsi {
41622 struct clk *clk_lp;
41623 struct clk *clk;
41624
41625- struct drm_info_list *debugfs_files;
41626+ drm_info_list_no_const *debugfs_files;
41627 struct drm_minor *minor;
41628 struct dentry *debugfs;
41629
41630diff --git a/drivers/gpu/drm/tegra/hdmi.c b/drivers/gpu/drm/tegra/hdmi.c
41631index 6928015..c9853e7 100644
41632--- a/drivers/gpu/drm/tegra/hdmi.c
41633+++ b/drivers/gpu/drm/tegra/hdmi.c
41634@@ -59,7 +59,7 @@ struct tegra_hdmi {
41635 bool stereo;
41636 bool dvi;
41637
41638- struct drm_info_list *debugfs_files;
41639+ drm_info_list_no_const *debugfs_files;
41640 struct drm_minor *minor;
41641 struct dentry *debugfs;
41642 };
41643diff --git a/drivers/gpu/drm/ttm/ttm_bo_manager.c b/drivers/gpu/drm/ttm/ttm_bo_manager.c
41644index c58eba33..83c2728 100644
41645--- a/drivers/gpu/drm/ttm/ttm_bo_manager.c
41646+++ b/drivers/gpu/drm/ttm/ttm_bo_manager.c
41647@@ -141,10 +141,10 @@ static void ttm_bo_man_debug(struct ttm_mem_type_manager *man,
41648 }
41649
41650 const struct ttm_mem_type_manager_func ttm_bo_manager_func = {
41651- ttm_bo_man_init,
41652- ttm_bo_man_takedown,
41653- ttm_bo_man_get_node,
41654- ttm_bo_man_put_node,
41655- ttm_bo_man_debug
41656+ .init = ttm_bo_man_init,
41657+ .takedown = ttm_bo_man_takedown,
41658+ .get_node = ttm_bo_man_get_node,
41659+ .put_node = ttm_bo_man_put_node,
41660+ .debug = ttm_bo_man_debug
41661 };
41662 EXPORT_SYMBOL(ttm_bo_manager_func);
41663diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c
41664index dbc2def..0a9f710 100644
41665--- a/drivers/gpu/drm/ttm/ttm_memory.c
41666+++ b/drivers/gpu/drm/ttm/ttm_memory.c
41667@@ -264,7 +264,7 @@ static int ttm_mem_init_kernel_zone(struct ttm_mem_global *glob,
41668 zone->glob = glob;
41669 glob->zone_kernel = zone;
41670 ret = kobject_init_and_add(
41671- &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
41672+ &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, "%s", zone->name);
41673 if (unlikely(ret != 0)) {
41674 kobject_put(&zone->kobj);
41675 return ret;
41676@@ -347,7 +347,7 @@ static int ttm_mem_init_dma32_zone(struct ttm_mem_global *glob,
41677 zone->glob = glob;
41678 glob->zone_dma32 = zone;
41679 ret = kobject_init_and_add(
41680- &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
41681+ &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, "%s", zone->name);
41682 if (unlikely(ret != 0)) {
41683 kobject_put(&zone->kobj);
41684 return ret;
41685diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
41686index 863bef9..cba15cf 100644
41687--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
41688+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
41689@@ -391,9 +391,9 @@ out:
41690 static unsigned long
41691 ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
41692 {
41693- static atomic_t start_pool = ATOMIC_INIT(0);
41694+ static atomic_unchecked_t start_pool = ATOMIC_INIT(0);
41695 unsigned i;
41696- unsigned pool_offset = atomic_add_return(1, &start_pool);
41697+ unsigned pool_offset = atomic_add_return_unchecked(1, &start_pool);
41698 struct ttm_page_pool *pool;
41699 int shrink_pages = sc->nr_to_scan;
41700 unsigned long freed = 0;
41701diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
41702index dbadd49..1b7457b 100644
41703--- a/drivers/gpu/drm/udl/udl_fb.c
41704+++ b/drivers/gpu/drm/udl/udl_fb.c
41705@@ -367,7 +367,6 @@ static int udl_fb_release(struct fb_info *info, int user)
41706 fb_deferred_io_cleanup(info);
41707 kfree(info->fbdefio);
41708 info->fbdefio = NULL;
41709- info->fbops->fb_mmap = udl_fb_mmap;
41710 }
41711
41712 pr_warn("released /dev/fb%d user=%d count=%d\n",
41713diff --git a/drivers/gpu/drm/via/via_drv.h b/drivers/gpu/drm/via/via_drv.h
41714index ad02732..144f5ed 100644
41715--- a/drivers/gpu/drm/via/via_drv.h
41716+++ b/drivers/gpu/drm/via/via_drv.h
41717@@ -51,7 +51,7 @@ typedef struct drm_via_ring_buffer {
41718 typedef uint32_t maskarray_t[5];
41719
41720 typedef struct drm_via_irq {
41721- atomic_t irq_received;
41722+ atomic_unchecked_t irq_received;
41723 uint32_t pending_mask;
41724 uint32_t enable_mask;
41725 wait_queue_head_t irq_queue;
41726@@ -75,7 +75,7 @@ typedef struct drm_via_private {
41727 struct timeval last_vblank;
41728 int last_vblank_valid;
41729 unsigned usec_per_vblank;
41730- atomic_t vbl_received;
41731+ atomic_unchecked_t vbl_received;
41732 drm_via_state_t hc_state;
41733 char pci_buf[VIA_PCI_BUF_SIZE];
41734 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
41735diff --git a/drivers/gpu/drm/via/via_irq.c b/drivers/gpu/drm/via/via_irq.c
41736index 1319433..a993b0c 100644
41737--- a/drivers/gpu/drm/via/via_irq.c
41738+++ b/drivers/gpu/drm/via/via_irq.c
41739@@ -101,7 +101,7 @@ u32 via_get_vblank_counter(struct drm_device *dev, int crtc)
41740 if (crtc != 0)
41741 return 0;
41742
41743- return atomic_read(&dev_priv->vbl_received);
41744+ return atomic_read_unchecked(&dev_priv->vbl_received);
41745 }
41746
41747 irqreturn_t via_driver_irq_handler(int irq, void *arg)
41748@@ -116,8 +116,8 @@ irqreturn_t via_driver_irq_handler(int irq, void *arg)
41749
41750 status = VIA_READ(VIA_REG_INTERRUPT);
41751 if (status & VIA_IRQ_VBLANK_PENDING) {
41752- atomic_inc(&dev_priv->vbl_received);
41753- if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
41754+ atomic_inc_unchecked(&dev_priv->vbl_received);
41755+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
41756 do_gettimeofday(&cur_vblank);
41757 if (dev_priv->last_vblank_valid) {
41758 dev_priv->usec_per_vblank =
41759@@ -127,7 +127,7 @@ irqreturn_t via_driver_irq_handler(int irq, void *arg)
41760 dev_priv->last_vblank = cur_vblank;
41761 dev_priv->last_vblank_valid = 1;
41762 }
41763- if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
41764+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
41765 DRM_DEBUG("US per vblank is: %u\n",
41766 dev_priv->usec_per_vblank);
41767 }
41768@@ -137,7 +137,7 @@ irqreturn_t via_driver_irq_handler(int irq, void *arg)
41769
41770 for (i = 0; i < dev_priv->num_irqs; ++i) {
41771 if (status & cur_irq->pending_mask) {
41772- atomic_inc(&cur_irq->irq_received);
41773+ atomic_inc_unchecked(&cur_irq->irq_received);
41774 wake_up(&cur_irq->irq_queue);
41775 handled = 1;
41776 if (dev_priv->irq_map[drm_via_irq_dma0_td] == i)
41777@@ -242,11 +242,11 @@ via_driver_irq_wait(struct drm_device *dev, unsigned int irq, int force_sequence
41778 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * HZ,
41779 ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
41780 masks[irq][4]));
41781- cur_irq_sequence = atomic_read(&cur_irq->irq_received);
41782+ cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
41783 } else {
41784 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * HZ,
41785 (((cur_irq_sequence =
41786- atomic_read(&cur_irq->irq_received)) -
41787+ atomic_read_unchecked(&cur_irq->irq_received)) -
41788 *sequence) <= (1 << 23)));
41789 }
41790 *sequence = cur_irq_sequence;
41791@@ -284,7 +284,7 @@ void via_driver_irq_preinstall(struct drm_device *dev)
41792 }
41793
41794 for (i = 0; i < dev_priv->num_irqs; ++i) {
41795- atomic_set(&cur_irq->irq_received, 0);
41796+ atomic_set_unchecked(&cur_irq->irq_received, 0);
41797 cur_irq->enable_mask = dev_priv->irq_masks[i][0];
41798 cur_irq->pending_mask = dev_priv->irq_masks[i][1];
41799 init_waitqueue_head(&cur_irq->irq_queue);
41800@@ -366,7 +366,7 @@ int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv)
41801 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
41802 case VIA_IRQ_RELATIVE:
41803 irqwait->request.sequence +=
41804- atomic_read(&cur_irq->irq_received);
41805+ atomic_read_unchecked(&cur_irq->irq_received);
41806 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
41807 case VIA_IRQ_ABSOLUTE:
41808 break;
41809diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
41810index 0783155..b29e18e 100644
41811--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
41812+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
41813@@ -437,7 +437,7 @@ struct vmw_private {
41814 * Fencing and IRQs.
41815 */
41816
41817- atomic_t marker_seq;
41818+ atomic_unchecked_t marker_seq;
41819 wait_queue_head_t fence_queue;
41820 wait_queue_head_t fifo_queue;
41821 int fence_queue_waiters; /* Protected by hw_mutex */
41822diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
41823index 6ccd993..618d592 100644
41824--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
41825+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
41826@@ -154,7 +154,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
41827 (unsigned int) min,
41828 (unsigned int) fifo->capabilities);
41829
41830- atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno);
41831+ atomic_set_unchecked(&dev_priv->marker_seq, dev_priv->last_read_seqno);
41832 iowrite32(dev_priv->last_read_seqno, fifo_mem + SVGA_FIFO_FENCE);
41833 vmw_marker_queue_init(&fifo->marker_queue);
41834 return vmw_fifo_send_fence(dev_priv, &dummy);
41835@@ -372,7 +372,7 @@ void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
41836 if (reserveable)
41837 iowrite32(bytes, fifo_mem +
41838 SVGA_FIFO_RESERVED);
41839- return fifo_mem + (next_cmd >> 2);
41840+ return (__le32 __force_kernel *)fifo_mem + (next_cmd >> 2);
41841 } else {
41842 need_bounce = true;
41843 }
41844@@ -492,7 +492,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
41845
41846 fm = vmw_fifo_reserve(dev_priv, bytes);
41847 if (unlikely(fm == NULL)) {
41848- *seqno = atomic_read(&dev_priv->marker_seq);
41849+ *seqno = atomic_read_unchecked(&dev_priv->marker_seq);
41850 ret = -ENOMEM;
41851 (void)vmw_fallback_wait(dev_priv, false, true, *seqno,
41852 false, 3*HZ);
41853@@ -500,7 +500,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
41854 }
41855
41856 do {
41857- *seqno = atomic_add_return(1, &dev_priv->marker_seq);
41858+ *seqno = atomic_add_return_unchecked(1, &dev_priv->marker_seq);
41859 } while (*seqno == 0);
41860
41861 if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
41862diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
41863index b1273e8..9c274fd 100644
41864--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
41865+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
41866@@ -164,9 +164,9 @@ static void vmw_gmrid_man_debug(struct ttm_mem_type_manager *man,
41867 }
41868
41869 const struct ttm_mem_type_manager_func vmw_gmrid_manager_func = {
41870- vmw_gmrid_man_init,
41871- vmw_gmrid_man_takedown,
41872- vmw_gmrid_man_get_node,
41873- vmw_gmrid_man_put_node,
41874- vmw_gmrid_man_debug
41875+ .init = vmw_gmrid_man_init,
41876+ .takedown = vmw_gmrid_man_takedown,
41877+ .get_node = vmw_gmrid_man_get_node,
41878+ .put_node = vmw_gmrid_man_put_node,
41879+ .debug = vmw_gmrid_man_debug
41880 };
41881diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
41882index 47b7094..698ba09 100644
41883--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
41884+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
41885@@ -236,7 +236,7 @@ int vmw_present_ioctl(struct drm_device *dev, void *data,
41886 int ret;
41887
41888 num_clips = arg->num_clips;
41889- clips_ptr = (struct drm_vmw_rect *)(unsigned long)arg->clips_ptr;
41890+ clips_ptr = (struct drm_vmw_rect __user *)(unsigned long)arg->clips_ptr;
41891
41892 if (unlikely(num_clips == 0))
41893 return 0;
41894@@ -320,7 +320,7 @@ int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
41895 int ret;
41896
41897 num_clips = arg->num_clips;
41898- clips_ptr = (struct drm_vmw_rect *)(unsigned long)arg->clips_ptr;
41899+ clips_ptr = (struct drm_vmw_rect __user *)(unsigned long)arg->clips_ptr;
41900
41901 if (unlikely(num_clips == 0))
41902 return 0;
41903diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
41904index 0c42376..6febe77 100644
41905--- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
41906+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
41907@@ -107,7 +107,7 @@ bool vmw_seqno_passed(struct vmw_private *dev_priv,
41908 * emitted. Then the fence is stale and signaled.
41909 */
41910
41911- ret = ((atomic_read(&dev_priv->marker_seq) - seqno)
41912+ ret = ((atomic_read_unchecked(&dev_priv->marker_seq) - seqno)
41913 > VMW_FENCE_WRAP);
41914
41915 return ret;
41916@@ -138,7 +138,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
41917
41918 if (fifo_idle)
41919 down_read(&fifo_state->rwsem);
41920- signal_seq = atomic_read(&dev_priv->marker_seq);
41921+ signal_seq = atomic_read_unchecked(&dev_priv->marker_seq);
41922 ret = 0;
41923
41924 for (;;) {
41925diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
41926index 8a8725c2..afed796 100644
41927--- a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
41928+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
41929@@ -151,7 +151,7 @@ int vmw_wait_lag(struct vmw_private *dev_priv,
41930 while (!vmw_lag_lt(queue, us)) {
41931 spin_lock(&queue->lock);
41932 if (list_empty(&queue->head))
41933- seqno = atomic_read(&dev_priv->marker_seq);
41934+ seqno = atomic_read_unchecked(&dev_priv->marker_seq);
41935 else {
41936 marker = list_first_entry(&queue->head,
41937 struct vmw_marker, head);
41938diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c
41939index ec0ae2d..dc0780b 100644
41940--- a/drivers/gpu/vga/vga_switcheroo.c
41941+++ b/drivers/gpu/vga/vga_switcheroo.c
41942@@ -643,7 +643,7 @@ static int vga_switcheroo_runtime_resume(struct device *dev)
41943
41944 /* this version is for the case where the power switch is separate
41945 to the device being powered down. */
41946-int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain)
41947+int vga_switcheroo_init_domain_pm_ops(struct device *dev, dev_pm_domain_no_const *domain)
41948 {
41949 /* copy over all the bus versions */
41950 if (dev->bus && dev->bus->pm) {
41951@@ -688,7 +688,7 @@ static int vga_switcheroo_runtime_resume_hdmi_audio(struct device *dev)
41952 return ret;
41953 }
41954
41955-int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain)
41956+int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, dev_pm_domain_no_const *domain)
41957 {
41958 /* copy over all the bus versions */
41959 if (dev->bus && dev->bus->pm) {
41960diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
41961index 8a5384c..cf63c18 100644
41962--- a/drivers/hid/hid-core.c
41963+++ b/drivers/hid/hid-core.c
41964@@ -2422,7 +2422,7 @@ EXPORT_SYMBOL_GPL(hid_ignore);
41965
41966 int hid_add_device(struct hid_device *hdev)
41967 {
41968- static atomic_t id = ATOMIC_INIT(0);
41969+ static atomic_unchecked_t id = ATOMIC_INIT(0);
41970 int ret;
41971
41972 if (WARN_ON(hdev->status & HID_STAT_ADDED))
41973@@ -2456,7 +2456,7 @@ int hid_add_device(struct hid_device *hdev)
41974 /* XXX hack, any other cleaner solution after the driver core
41975 * is converted to allow more than 20 bytes as the device name? */
41976 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
41977- hdev->vendor, hdev->product, atomic_inc_return(&id));
41978+ hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
41979
41980 hid_debug_register(hdev, dev_name(&hdev->dev));
41981 ret = device_add(&hdev->dev);
41982diff --git a/drivers/hid/hid-wiimote-debug.c b/drivers/hid/hid-wiimote-debug.c
41983index c13fb5b..55a3802 100644
41984--- a/drivers/hid/hid-wiimote-debug.c
41985+++ b/drivers/hid/hid-wiimote-debug.c
41986@@ -66,7 +66,7 @@ static ssize_t wiidebug_eeprom_read(struct file *f, char __user *u, size_t s,
41987 else if (size == 0)
41988 return -EIO;
41989
41990- if (copy_to_user(u, buf, size))
41991+ if (size > sizeof(buf) || copy_to_user(u, buf, size))
41992 return -EFAULT;
41993
41994 *off += size;
41995diff --git a/drivers/hid/uhid.c b/drivers/hid/uhid.c
41996index cedc6da..2c3da2a 100644
41997--- a/drivers/hid/uhid.c
41998+++ b/drivers/hid/uhid.c
41999@@ -47,7 +47,7 @@ struct uhid_device {
42000 struct mutex report_lock;
42001 wait_queue_head_t report_wait;
42002 atomic_t report_done;
42003- atomic_t report_id;
42004+ atomic_unchecked_t report_id;
42005 struct uhid_event report_buf;
42006 };
42007
42008@@ -163,7 +163,7 @@ static int uhid_hid_get_raw(struct hid_device *hid, unsigned char rnum,
42009
42010 spin_lock_irqsave(&uhid->qlock, flags);
42011 ev->type = UHID_FEATURE;
42012- ev->u.feature.id = atomic_inc_return(&uhid->report_id);
42013+ ev->u.feature.id = atomic_inc_return_unchecked(&uhid->report_id);
42014 ev->u.feature.rnum = rnum;
42015 ev->u.feature.rtype = report_type;
42016
42017@@ -446,7 +446,7 @@ static int uhid_dev_feature_answer(struct uhid_device *uhid,
42018 spin_lock_irqsave(&uhid->qlock, flags);
42019
42020 /* id for old report; drop it silently */
42021- if (atomic_read(&uhid->report_id) != ev->u.feature_answer.id)
42022+ if (atomic_read_unchecked(&uhid->report_id) != ev->u.feature_answer.id)
42023 goto unlock;
42024 if (atomic_read(&uhid->report_done))
42025 goto unlock;
42026diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
42027index 69ea36f..8dbf4bb 100644
42028--- a/drivers/hv/channel.c
42029+++ b/drivers/hv/channel.c
42030@@ -364,8 +364,8 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
42031 int ret = 0;
42032 int t;
42033
42034- next_gpadl_handle = atomic_read(&vmbus_connection.next_gpadl_handle);
42035- atomic_inc(&vmbus_connection.next_gpadl_handle);
42036+ next_gpadl_handle = atomic_read_unchecked(&vmbus_connection.next_gpadl_handle);
42037+ atomic_inc_unchecked(&vmbus_connection.next_gpadl_handle);
42038
42039 ret = create_gpadl_header(kbuffer, size, &msginfo, &msgcount);
42040 if (ret)
42041diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
42042index bcb4950..61dba6c 100644
42043--- a/drivers/hv/hv.c
42044+++ b/drivers/hv/hv.c
42045@@ -112,7 +112,7 @@ static u64 do_hypercall(u64 control, void *input, void *output)
42046 u64 output_address = (output) ? virt_to_phys(output) : 0;
42047 u32 output_address_hi = output_address >> 32;
42048 u32 output_address_lo = output_address & 0xFFFFFFFF;
42049- void *hypercall_page = hv_context.hypercall_page;
42050+ void *hypercall_page = ktva_ktla(hv_context.hypercall_page);
42051
42052 __asm__ __volatile__ ("call *%8" : "=d"(hv_status_hi),
42053 "=a"(hv_status_lo) : "d" (control_hi),
42054@@ -154,7 +154,7 @@ int hv_init(void)
42055 /* See if the hypercall page is already set */
42056 rdmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
42057
42058- virtaddr = __vmalloc(PAGE_SIZE, GFP_KERNEL, PAGE_KERNEL_EXEC);
42059+ virtaddr = __vmalloc(PAGE_SIZE, GFP_KERNEL, PAGE_KERNEL_RX);
42060
42061 if (!virtaddr)
42062 goto cleanup;
42063diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c
42064index 7e17a54..a50a33d 100644
42065--- a/drivers/hv/hv_balloon.c
42066+++ b/drivers/hv/hv_balloon.c
42067@@ -464,7 +464,7 @@ MODULE_PARM_DESC(hot_add, "If set attempt memory hot_add");
42068
42069 module_param(pressure_report_delay, uint, (S_IRUGO | S_IWUSR));
42070 MODULE_PARM_DESC(pressure_report_delay, "Delay in secs in reporting pressure");
42071-static atomic_t trans_id = ATOMIC_INIT(0);
42072+static atomic_unchecked_t trans_id = ATOMIC_INIT(0);
42073
42074 static int dm_ring_size = (5 * PAGE_SIZE);
42075
42076@@ -886,7 +886,7 @@ static void hot_add_req(struct work_struct *dummy)
42077 pr_info("Memory hot add failed\n");
42078
42079 dm->state = DM_INITIALIZED;
42080- resp.hdr.trans_id = atomic_inc_return(&trans_id);
42081+ resp.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
42082 vmbus_sendpacket(dm->dev->channel, &resp,
42083 sizeof(struct dm_hot_add_response),
42084 (unsigned long)NULL,
42085@@ -960,7 +960,7 @@ static void post_status(struct hv_dynmem_device *dm)
42086 memset(&status, 0, sizeof(struct dm_status));
42087 status.hdr.type = DM_STATUS_REPORT;
42088 status.hdr.size = sizeof(struct dm_status);
42089- status.hdr.trans_id = atomic_inc_return(&trans_id);
42090+ status.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
42091
42092 /*
42093 * The host expects the guest to report free memory.
42094@@ -980,7 +980,7 @@ static void post_status(struct hv_dynmem_device *dm)
42095 * send the status. This can happen if we were interrupted
42096 * after we picked our transaction ID.
42097 */
42098- if (status.hdr.trans_id != atomic_read(&trans_id))
42099+ if (status.hdr.trans_id != atomic_read_unchecked(&trans_id))
42100 return;
42101
42102 vmbus_sendpacket(dm->dev->channel, &status,
42103@@ -1108,7 +1108,7 @@ static void balloon_up(struct work_struct *dummy)
42104 */
42105
42106 do {
42107- bl_resp->hdr.trans_id = atomic_inc_return(&trans_id);
42108+ bl_resp->hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
42109 ret = vmbus_sendpacket(dm_device.dev->channel,
42110 bl_resp,
42111 bl_resp->hdr.size,
42112@@ -1152,7 +1152,7 @@ static void balloon_down(struct hv_dynmem_device *dm,
42113
42114 memset(&resp, 0, sizeof(struct dm_unballoon_response));
42115 resp.hdr.type = DM_UNBALLOON_RESPONSE;
42116- resp.hdr.trans_id = atomic_inc_return(&trans_id);
42117+ resp.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
42118 resp.hdr.size = sizeof(struct dm_unballoon_response);
42119
42120 vmbus_sendpacket(dm_device.dev->channel, &resp,
42121@@ -1215,7 +1215,7 @@ static void version_resp(struct hv_dynmem_device *dm,
42122 memset(&version_req, 0, sizeof(struct dm_version_request));
42123 version_req.hdr.type = DM_VERSION_REQUEST;
42124 version_req.hdr.size = sizeof(struct dm_version_request);
42125- version_req.hdr.trans_id = atomic_inc_return(&trans_id);
42126+ version_req.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
42127 version_req.version.version = DYNMEM_PROTOCOL_VERSION_WIN7;
42128 version_req.is_last_attempt = 1;
42129
42130@@ -1385,7 +1385,7 @@ static int balloon_probe(struct hv_device *dev,
42131 memset(&version_req, 0, sizeof(struct dm_version_request));
42132 version_req.hdr.type = DM_VERSION_REQUEST;
42133 version_req.hdr.size = sizeof(struct dm_version_request);
42134- version_req.hdr.trans_id = atomic_inc_return(&trans_id);
42135+ version_req.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
42136 version_req.version.version = DYNMEM_PROTOCOL_VERSION_WIN8;
42137 version_req.is_last_attempt = 0;
42138
42139@@ -1416,7 +1416,7 @@ static int balloon_probe(struct hv_device *dev,
42140 memset(&cap_msg, 0, sizeof(struct dm_capabilities));
42141 cap_msg.hdr.type = DM_CAPABILITIES_REPORT;
42142 cap_msg.hdr.size = sizeof(struct dm_capabilities);
42143- cap_msg.hdr.trans_id = atomic_inc_return(&trans_id);
42144+ cap_msg.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
42145
42146 cap_msg.caps.cap_bits.balloon = 1;
42147 cap_msg.caps.cap_bits.hot_add = 1;
42148diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
42149index e055176..c22ff1f 100644
42150--- a/drivers/hv/hyperv_vmbus.h
42151+++ b/drivers/hv/hyperv_vmbus.h
42152@@ -602,7 +602,7 @@ enum vmbus_connect_state {
42153 struct vmbus_connection {
42154 enum vmbus_connect_state conn_state;
42155
42156- atomic_t next_gpadl_handle;
42157+ atomic_unchecked_t next_gpadl_handle;
42158
42159 /*
42160 * Represents channel interrupts. Each bit position represents a
42161diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
42162index 077bb1b..d433d74 100644
42163--- a/drivers/hv/vmbus_drv.c
42164+++ b/drivers/hv/vmbus_drv.c
42165@@ -844,10 +844,10 @@ int vmbus_device_register(struct hv_device *child_device_obj)
42166 {
42167 int ret = 0;
42168
42169- static atomic_t device_num = ATOMIC_INIT(0);
42170+ static atomic_unchecked_t device_num = ATOMIC_INIT(0);
42171
42172 dev_set_name(&child_device_obj->device, "vmbus_0_%d",
42173- atomic_inc_return(&device_num));
42174+ atomic_inc_return_unchecked(&device_num));
42175
42176 child_device_obj->device.bus = &hv_bus;
42177 child_device_obj->device.parent = &hv_acpi_dev->dev;
42178diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c
42179index 579bdf9..75118b5 100644
42180--- a/drivers/hwmon/acpi_power_meter.c
42181+++ b/drivers/hwmon/acpi_power_meter.c
42182@@ -116,7 +116,7 @@ struct sensor_template {
42183 struct device_attribute *devattr,
42184 const char *buf, size_t count);
42185 int index;
42186-};
42187+} __do_const;
42188
42189 /* Averaging interval */
42190 static int update_avg_interval(struct acpi_power_meter_resource *resource)
42191@@ -631,7 +631,7 @@ static int register_attrs(struct acpi_power_meter_resource *resource,
42192 struct sensor_template *attrs)
42193 {
42194 struct device *dev = &resource->acpi_dev->dev;
42195- struct sensor_device_attribute *sensors =
42196+ sensor_device_attribute_no_const *sensors =
42197 &resource->sensors[resource->num_sensors];
42198 int res = 0;
42199
42200diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c
42201index 3288f13..71cfb4e 100644
42202--- a/drivers/hwmon/applesmc.c
42203+++ b/drivers/hwmon/applesmc.c
42204@@ -1106,7 +1106,7 @@ static int applesmc_create_nodes(struct applesmc_node_group *groups, int num)
42205 {
42206 struct applesmc_node_group *grp;
42207 struct applesmc_dev_attr *node;
42208- struct attribute *attr;
42209+ attribute_no_const *attr;
42210 int ret, i;
42211
42212 for (grp = groups; grp->format; grp++) {
42213diff --git a/drivers/hwmon/asus_atk0110.c b/drivers/hwmon/asus_atk0110.c
42214index ae208f6..48b6c5b 100644
42215--- a/drivers/hwmon/asus_atk0110.c
42216+++ b/drivers/hwmon/asus_atk0110.c
42217@@ -147,10 +147,10 @@ MODULE_DEVICE_TABLE(acpi, atk_ids);
42218 struct atk_sensor_data {
42219 struct list_head list;
42220 struct atk_data *data;
42221- struct device_attribute label_attr;
42222- struct device_attribute input_attr;
42223- struct device_attribute limit1_attr;
42224- struct device_attribute limit2_attr;
42225+ device_attribute_no_const label_attr;
42226+ device_attribute_no_const input_attr;
42227+ device_attribute_no_const limit1_attr;
42228+ device_attribute_no_const limit2_attr;
42229 char label_attr_name[ATTR_NAME_SIZE];
42230 char input_attr_name[ATTR_NAME_SIZE];
42231 char limit1_attr_name[ATTR_NAME_SIZE];
42232@@ -270,7 +270,7 @@ static ssize_t atk_name_show(struct device *dev,
42233 static struct device_attribute atk_name_attr =
42234 __ATTR(name, 0444, atk_name_show, NULL);
42235
42236-static void atk_init_attribute(struct device_attribute *attr, char *name,
42237+static void atk_init_attribute(device_attribute_no_const *attr, char *name,
42238 sysfs_show_func show)
42239 {
42240 sysfs_attr_init(&attr->attr);
42241diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
42242index 1599310..cd9525c 100644
42243--- a/drivers/hwmon/coretemp.c
42244+++ b/drivers/hwmon/coretemp.c
42245@@ -823,7 +823,7 @@ static int coretemp_cpu_callback(struct notifier_block *nfb,
42246 return NOTIFY_OK;
42247 }
42248
42249-static struct notifier_block coretemp_cpu_notifier __refdata = {
42250+static struct notifier_block coretemp_cpu_notifier = {
42251 .notifier_call = coretemp_cpu_callback,
42252 };
42253
42254diff --git a/drivers/hwmon/ibmaem.c b/drivers/hwmon/ibmaem.c
42255index 632f1dc..57e6a58 100644
42256--- a/drivers/hwmon/ibmaem.c
42257+++ b/drivers/hwmon/ibmaem.c
42258@@ -926,7 +926,7 @@ static int aem_register_sensors(struct aem_data *data,
42259 struct aem_rw_sensor_template *rw)
42260 {
42261 struct device *dev = &data->pdev->dev;
42262- struct sensor_device_attribute *sensors = data->sensors;
42263+ sensor_device_attribute_no_const *sensors = data->sensors;
42264 int err;
42265
42266 /* Set up read-only sensors */
42267diff --git a/drivers/hwmon/iio_hwmon.c b/drivers/hwmon/iio_hwmon.c
42268index 708081b..fe2d4ab 100644
42269--- a/drivers/hwmon/iio_hwmon.c
42270+++ b/drivers/hwmon/iio_hwmon.c
42271@@ -73,7 +73,7 @@ static int iio_hwmon_probe(struct platform_device *pdev)
42272 {
42273 struct device *dev = &pdev->dev;
42274 struct iio_hwmon_state *st;
42275- struct sensor_device_attribute *a;
42276+ sensor_device_attribute_no_const *a;
42277 int ret, i;
42278 int in_i = 1, temp_i = 1, curr_i = 1;
42279 enum iio_chan_type type;
42280diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c
42281index 38d5a63..cf2c2ea 100644
42282--- a/drivers/hwmon/nct6775.c
42283+++ b/drivers/hwmon/nct6775.c
42284@@ -944,10 +944,10 @@ static struct attribute_group *
42285 nct6775_create_attr_group(struct device *dev, struct sensor_template_group *tg,
42286 int repeat)
42287 {
42288- struct attribute_group *group;
42289+ attribute_group_no_const *group;
42290 struct sensor_device_attr_u *su;
42291- struct sensor_device_attribute *a;
42292- struct sensor_device_attribute_2 *a2;
42293+ sensor_device_attribute_no_const *a;
42294+ sensor_device_attribute_2_no_const *a2;
42295 struct attribute **attrs;
42296 struct sensor_device_template **t;
42297 int i, count;
42298diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c
42299index 291d11f..3f0dbbd 100644
42300--- a/drivers/hwmon/pmbus/pmbus_core.c
42301+++ b/drivers/hwmon/pmbus/pmbus_core.c
42302@@ -783,7 +783,7 @@ static int pmbus_add_attribute(struct pmbus_data *data, struct attribute *attr)
42303 return 0;
42304 }
42305
42306-static void pmbus_dev_attr_init(struct device_attribute *dev_attr,
42307+static void pmbus_dev_attr_init(device_attribute_no_const *dev_attr,
42308 const char *name,
42309 umode_t mode,
42310 ssize_t (*show)(struct device *dev,
42311@@ -800,7 +800,7 @@ static void pmbus_dev_attr_init(struct device_attribute *dev_attr,
42312 dev_attr->store = store;
42313 }
42314
42315-static void pmbus_attr_init(struct sensor_device_attribute *a,
42316+static void pmbus_attr_init(sensor_device_attribute_no_const *a,
42317 const char *name,
42318 umode_t mode,
42319 ssize_t (*show)(struct device *dev,
42320@@ -822,7 +822,7 @@ static int pmbus_add_boolean(struct pmbus_data *data,
42321 u16 reg, u8 mask)
42322 {
42323 struct pmbus_boolean *boolean;
42324- struct sensor_device_attribute *a;
42325+ sensor_device_attribute_no_const *a;
42326
42327 boolean = devm_kzalloc(data->dev, sizeof(*boolean), GFP_KERNEL);
42328 if (!boolean)
42329@@ -847,7 +847,7 @@ static struct pmbus_sensor *pmbus_add_sensor(struct pmbus_data *data,
42330 bool update, bool readonly)
42331 {
42332 struct pmbus_sensor *sensor;
42333- struct device_attribute *a;
42334+ device_attribute_no_const *a;
42335
42336 sensor = devm_kzalloc(data->dev, sizeof(*sensor), GFP_KERNEL);
42337 if (!sensor)
42338@@ -878,7 +878,7 @@ static int pmbus_add_label(struct pmbus_data *data,
42339 const char *lstring, int index)
42340 {
42341 struct pmbus_label *label;
42342- struct device_attribute *a;
42343+ device_attribute_no_const *a;
42344
42345 label = devm_kzalloc(data->dev, sizeof(*label), GFP_KERNEL);
42346 if (!label)
42347diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
42348index 97cd45a..ac54d8b 100644
42349--- a/drivers/hwmon/sht15.c
42350+++ b/drivers/hwmon/sht15.c
42351@@ -169,7 +169,7 @@ struct sht15_data {
42352 int supply_uv;
42353 bool supply_uv_valid;
42354 struct work_struct update_supply_work;
42355- atomic_t interrupt_handled;
42356+ atomic_unchecked_t interrupt_handled;
42357 };
42358
42359 /**
42360@@ -542,13 +542,13 @@ static int sht15_measurement(struct sht15_data *data,
42361 ret = gpio_direction_input(data->pdata->gpio_data);
42362 if (ret)
42363 return ret;
42364- atomic_set(&data->interrupt_handled, 0);
42365+ atomic_set_unchecked(&data->interrupt_handled, 0);
42366
42367 enable_irq(gpio_to_irq(data->pdata->gpio_data));
42368 if (gpio_get_value(data->pdata->gpio_data) == 0) {
42369 disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
42370 /* Only relevant if the interrupt hasn't occurred. */
42371- if (!atomic_read(&data->interrupt_handled))
42372+ if (!atomic_read_unchecked(&data->interrupt_handled))
42373 schedule_work(&data->read_work);
42374 }
42375 ret = wait_event_timeout(data->wait_queue,
42376@@ -820,7 +820,7 @@ static irqreturn_t sht15_interrupt_fired(int irq, void *d)
42377
42378 /* First disable the interrupt */
42379 disable_irq_nosync(irq);
42380- atomic_inc(&data->interrupt_handled);
42381+ atomic_inc_unchecked(&data->interrupt_handled);
42382 /* Then schedule a reading work struct */
42383 if (data->state != SHT15_READING_NOTHING)
42384 schedule_work(&data->read_work);
42385@@ -842,11 +842,11 @@ static void sht15_bh_read_data(struct work_struct *work_s)
42386 * If not, then start the interrupt again - care here as could
42387 * have gone low in meantime so verify it hasn't!
42388 */
42389- atomic_set(&data->interrupt_handled, 0);
42390+ atomic_set_unchecked(&data->interrupt_handled, 0);
42391 enable_irq(gpio_to_irq(data->pdata->gpio_data));
42392 /* If still not occurred or another handler was scheduled */
42393 if (gpio_get_value(data->pdata->gpio_data)
42394- || atomic_read(&data->interrupt_handled))
42395+ || atomic_read_unchecked(&data->interrupt_handled))
42396 return;
42397 }
42398
42399diff --git a/drivers/hwmon/via-cputemp.c b/drivers/hwmon/via-cputemp.c
42400index 38944e9..ae9e5ed 100644
42401--- a/drivers/hwmon/via-cputemp.c
42402+++ b/drivers/hwmon/via-cputemp.c
42403@@ -296,7 +296,7 @@ static int via_cputemp_cpu_callback(struct notifier_block *nfb,
42404 return NOTIFY_OK;
42405 }
42406
42407-static struct notifier_block via_cputemp_cpu_notifier __refdata = {
42408+static struct notifier_block via_cputemp_cpu_notifier = {
42409 .notifier_call = via_cputemp_cpu_callback,
42410 };
42411
42412diff --git a/drivers/i2c/busses/i2c-amd756-s4882.c b/drivers/i2c/busses/i2c-amd756-s4882.c
42413index 41fc683..a39cfea 100644
42414--- a/drivers/i2c/busses/i2c-amd756-s4882.c
42415+++ b/drivers/i2c/busses/i2c-amd756-s4882.c
42416@@ -43,7 +43,7 @@
42417 extern struct i2c_adapter amd756_smbus;
42418
42419 static struct i2c_adapter *s4882_adapter;
42420-static struct i2c_algorithm *s4882_algo;
42421+static i2c_algorithm_no_const *s4882_algo;
42422
42423 /* Wrapper access functions for multiplexed SMBus */
42424 static DEFINE_MUTEX(amd756_lock);
42425diff --git a/drivers/i2c/busses/i2c-diolan-u2c.c b/drivers/i2c/busses/i2c-diolan-u2c.c
42426index 721f7eb..0fd2a09 100644
42427--- a/drivers/i2c/busses/i2c-diolan-u2c.c
42428+++ b/drivers/i2c/busses/i2c-diolan-u2c.c
42429@@ -98,7 +98,7 @@ MODULE_PARM_DESC(frequency, "I2C clock frequency in hertz");
42430 /* usb layer */
42431
42432 /* Send command to device, and get response. */
42433-static int diolan_usb_transfer(struct i2c_diolan_u2c *dev)
42434+static int __intentional_overflow(-1) diolan_usb_transfer(struct i2c_diolan_u2c *dev)
42435 {
42436 int ret = 0;
42437 int actual;
42438diff --git a/drivers/i2c/busses/i2c-nforce2-s4985.c b/drivers/i2c/busses/i2c-nforce2-s4985.c
42439index b170bdf..3c76427 100644
42440--- a/drivers/i2c/busses/i2c-nforce2-s4985.c
42441+++ b/drivers/i2c/busses/i2c-nforce2-s4985.c
42442@@ -41,7 +41,7 @@
42443 extern struct i2c_adapter *nforce2_smbus;
42444
42445 static struct i2c_adapter *s4985_adapter;
42446-static struct i2c_algorithm *s4985_algo;
42447+static i2c_algorithm_no_const *s4985_algo;
42448
42449 /* Wrapper access functions for multiplexed SMBus */
42450 static DEFINE_MUTEX(nforce2_lock);
42451diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c
42452index 80b47e8..1a6040d9 100644
42453--- a/drivers/i2c/i2c-dev.c
42454+++ b/drivers/i2c/i2c-dev.c
42455@@ -277,7 +277,7 @@ static noinline int i2cdev_ioctl_rdrw(struct i2c_client *client,
42456 break;
42457 }
42458
42459- data_ptrs[i] = (u8 __user *)rdwr_pa[i].buf;
42460+ data_ptrs[i] = (u8 __force_user *)rdwr_pa[i].buf;
42461 rdwr_pa[i].buf = memdup_user(data_ptrs[i], rdwr_pa[i].len);
42462 if (IS_ERR(rdwr_pa[i].buf)) {
42463 res = PTR_ERR(rdwr_pa[i].buf);
42464diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
42465index 0b510ba..4fbb5085 100644
42466--- a/drivers/ide/ide-cd.c
42467+++ b/drivers/ide/ide-cd.c
42468@@ -768,7 +768,7 @@ static void cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
42469 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
42470 if ((unsigned long)buf & alignment
42471 || blk_rq_bytes(rq) & q->dma_pad_mask
42472- || object_is_on_stack(buf))
42473+ || object_starts_on_stack(buf))
42474 drive->dma = 0;
42475 }
42476 }
42477diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
42478index acc911a..8700c3c 100644
42479--- a/drivers/iio/industrialio-core.c
42480+++ b/drivers/iio/industrialio-core.c
42481@@ -527,7 +527,7 @@ static ssize_t iio_write_channel_info(struct device *dev,
42482 }
42483
42484 static
42485-int __iio_device_attr_init(struct device_attribute *dev_attr,
42486+int __iio_device_attr_init(device_attribute_no_const *dev_attr,
42487 const char *postfix,
42488 struct iio_chan_spec const *chan,
42489 ssize_t (*readfunc)(struct device *dev,
42490diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
42491index c323917..6ddea8b 100644
42492--- a/drivers/infiniband/core/cm.c
42493+++ b/drivers/infiniband/core/cm.c
42494@@ -115,7 +115,7 @@ static char const counter_group_names[CM_COUNTER_GROUPS]
42495
42496 struct cm_counter_group {
42497 struct kobject obj;
42498- atomic_long_t counter[CM_ATTR_COUNT];
42499+ atomic_long_unchecked_t counter[CM_ATTR_COUNT];
42500 };
42501
42502 struct cm_counter_attribute {
42503@@ -1398,7 +1398,7 @@ static void cm_dup_req_handler(struct cm_work *work,
42504 struct ib_mad_send_buf *msg = NULL;
42505 int ret;
42506
42507- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
42508+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
42509 counter[CM_REQ_COUNTER]);
42510
42511 /* Quick state check to discard duplicate REQs. */
42512@@ -1785,7 +1785,7 @@ static void cm_dup_rep_handler(struct cm_work *work)
42513 if (!cm_id_priv)
42514 return;
42515
42516- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
42517+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
42518 counter[CM_REP_COUNTER]);
42519 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
42520 if (ret)
42521@@ -1952,7 +1952,7 @@ static int cm_rtu_handler(struct cm_work *work)
42522 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
42523 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
42524 spin_unlock_irq(&cm_id_priv->lock);
42525- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
42526+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
42527 counter[CM_RTU_COUNTER]);
42528 goto out;
42529 }
42530@@ -2135,7 +2135,7 @@ static int cm_dreq_handler(struct cm_work *work)
42531 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
42532 dreq_msg->local_comm_id);
42533 if (!cm_id_priv) {
42534- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
42535+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
42536 counter[CM_DREQ_COUNTER]);
42537 cm_issue_drep(work->port, work->mad_recv_wc);
42538 return -EINVAL;
42539@@ -2160,7 +2160,7 @@ static int cm_dreq_handler(struct cm_work *work)
42540 case IB_CM_MRA_REP_RCVD:
42541 break;
42542 case IB_CM_TIMEWAIT:
42543- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
42544+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
42545 counter[CM_DREQ_COUNTER]);
42546 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
42547 goto unlock;
42548@@ -2174,7 +2174,7 @@ static int cm_dreq_handler(struct cm_work *work)
42549 cm_free_msg(msg);
42550 goto deref;
42551 case IB_CM_DREQ_RCVD:
42552- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
42553+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
42554 counter[CM_DREQ_COUNTER]);
42555 goto unlock;
42556 default:
42557@@ -2541,7 +2541,7 @@ static int cm_mra_handler(struct cm_work *work)
42558 ib_modify_mad(cm_id_priv->av.port->mad_agent,
42559 cm_id_priv->msg, timeout)) {
42560 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
42561- atomic_long_inc(&work->port->
42562+ atomic_long_inc_unchecked(&work->port->
42563 counter_group[CM_RECV_DUPLICATES].
42564 counter[CM_MRA_COUNTER]);
42565 goto out;
42566@@ -2550,7 +2550,7 @@ static int cm_mra_handler(struct cm_work *work)
42567 break;
42568 case IB_CM_MRA_REQ_RCVD:
42569 case IB_CM_MRA_REP_RCVD:
42570- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
42571+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
42572 counter[CM_MRA_COUNTER]);
42573 /* fall through */
42574 default:
42575@@ -2712,7 +2712,7 @@ static int cm_lap_handler(struct cm_work *work)
42576 case IB_CM_LAP_IDLE:
42577 break;
42578 case IB_CM_MRA_LAP_SENT:
42579- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
42580+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
42581 counter[CM_LAP_COUNTER]);
42582 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
42583 goto unlock;
42584@@ -2728,7 +2728,7 @@ static int cm_lap_handler(struct cm_work *work)
42585 cm_free_msg(msg);
42586 goto deref;
42587 case IB_CM_LAP_RCVD:
42588- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
42589+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
42590 counter[CM_LAP_COUNTER]);
42591 goto unlock;
42592 default:
42593@@ -3012,7 +3012,7 @@ static int cm_sidr_req_handler(struct cm_work *work)
42594 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
42595 if (cur_cm_id_priv) {
42596 spin_unlock_irq(&cm.lock);
42597- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
42598+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
42599 counter[CM_SIDR_REQ_COUNTER]);
42600 goto out; /* Duplicate message. */
42601 }
42602@@ -3224,10 +3224,10 @@ static void cm_send_handler(struct ib_mad_agent *mad_agent,
42603 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
42604 msg->retries = 1;
42605
42606- atomic_long_add(1 + msg->retries,
42607+ atomic_long_add_unchecked(1 + msg->retries,
42608 &port->counter_group[CM_XMIT].counter[attr_index]);
42609 if (msg->retries)
42610- atomic_long_add(msg->retries,
42611+ atomic_long_add_unchecked(msg->retries,
42612 &port->counter_group[CM_XMIT_RETRIES].
42613 counter[attr_index]);
42614
42615@@ -3437,7 +3437,7 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent,
42616 }
42617
42618 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
42619- atomic_long_inc(&port->counter_group[CM_RECV].
42620+ atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
42621 counter[attr_id - CM_ATTR_ID_OFFSET]);
42622
42623 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
42624@@ -3668,7 +3668,7 @@ static ssize_t cm_show_counter(struct kobject *obj, struct attribute *attr,
42625 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
42626
42627 return sprintf(buf, "%ld\n",
42628- atomic_long_read(&group->counter[cm_attr->index]));
42629+ atomic_long_read_unchecked(&group->counter[cm_attr->index]));
42630 }
42631
42632 static const struct sysfs_ops cm_counter_ops = {
42633diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c
42634index 9f5ad7c..588cd84 100644
42635--- a/drivers/infiniband/core/fmr_pool.c
42636+++ b/drivers/infiniband/core/fmr_pool.c
42637@@ -98,8 +98,8 @@ struct ib_fmr_pool {
42638
42639 struct task_struct *thread;
42640
42641- atomic_t req_ser;
42642- atomic_t flush_ser;
42643+ atomic_unchecked_t req_ser;
42644+ atomic_unchecked_t flush_ser;
42645
42646 wait_queue_head_t force_wait;
42647 };
42648@@ -179,10 +179,10 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
42649 struct ib_fmr_pool *pool = pool_ptr;
42650
42651 do {
42652- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
42653+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
42654 ib_fmr_batch_release(pool);
42655
42656- atomic_inc(&pool->flush_ser);
42657+ atomic_inc_unchecked(&pool->flush_ser);
42658 wake_up_interruptible(&pool->force_wait);
42659
42660 if (pool->flush_function)
42661@@ -190,7 +190,7 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
42662 }
42663
42664 set_current_state(TASK_INTERRUPTIBLE);
42665- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
42666+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
42667 !kthread_should_stop())
42668 schedule();
42669 __set_current_state(TASK_RUNNING);
42670@@ -282,8 +282,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
42671 pool->dirty_watermark = params->dirty_watermark;
42672 pool->dirty_len = 0;
42673 spin_lock_init(&pool->pool_lock);
42674- atomic_set(&pool->req_ser, 0);
42675- atomic_set(&pool->flush_ser, 0);
42676+ atomic_set_unchecked(&pool->req_ser, 0);
42677+ atomic_set_unchecked(&pool->flush_ser, 0);
42678 init_waitqueue_head(&pool->force_wait);
42679
42680 pool->thread = kthread_run(ib_fmr_cleanup_thread,
42681@@ -411,11 +411,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool *pool)
42682 }
42683 spin_unlock_irq(&pool->pool_lock);
42684
42685- serial = atomic_inc_return(&pool->req_ser);
42686+ serial = atomic_inc_return_unchecked(&pool->req_ser);
42687 wake_up_process(pool->thread);
42688
42689 if (wait_event_interruptible(pool->force_wait,
42690- atomic_read(&pool->flush_ser) - serial >= 0))
42691+ atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
42692 return -EINTR;
42693
42694 return 0;
42695@@ -525,7 +525,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr)
42696 } else {
42697 list_add_tail(&fmr->list, &pool->dirty_list);
42698 if (++pool->dirty_len >= pool->dirty_watermark) {
42699- atomic_inc(&pool->req_ser);
42700+ atomic_inc_unchecked(&pool->req_ser);
42701 wake_up_process(pool->thread);
42702 }
42703 }
42704diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
42705index 41b1195..27971a0 100644
42706--- a/drivers/infiniband/hw/cxgb4/mem.c
42707+++ b/drivers/infiniband/hw/cxgb4/mem.c
42708@@ -249,7 +249,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
42709 int err;
42710 struct fw_ri_tpte tpt;
42711 u32 stag_idx;
42712- static atomic_t key;
42713+ static atomic_unchecked_t key;
42714
42715 if (c4iw_fatal_error(rdev))
42716 return -EIO;
42717@@ -266,7 +266,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
42718 if (rdev->stats.stag.cur > rdev->stats.stag.max)
42719 rdev->stats.stag.max = rdev->stats.stag.cur;
42720 mutex_unlock(&rdev->stats.lock);
42721- *stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff);
42722+ *stag = (stag_idx << 8) | (atomic_inc_return_unchecked(&key) & 0xff);
42723 }
42724 PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
42725 __func__, stag_state, type, pdid, stag_idx);
42726diff --git a/drivers/infiniband/hw/ipath/ipath_dma.c b/drivers/infiniband/hw/ipath/ipath_dma.c
42727index 644c2c7..ecf0879 100644
42728--- a/drivers/infiniband/hw/ipath/ipath_dma.c
42729+++ b/drivers/infiniband/hw/ipath/ipath_dma.c
42730@@ -176,17 +176,17 @@ static void ipath_dma_free_coherent(struct ib_device *dev, size_t size,
42731 }
42732
42733 struct ib_dma_mapping_ops ipath_dma_mapping_ops = {
42734- ipath_mapping_error,
42735- ipath_dma_map_single,
42736- ipath_dma_unmap_single,
42737- ipath_dma_map_page,
42738- ipath_dma_unmap_page,
42739- ipath_map_sg,
42740- ipath_unmap_sg,
42741- ipath_sg_dma_address,
42742- ipath_sg_dma_len,
42743- ipath_sync_single_for_cpu,
42744- ipath_sync_single_for_device,
42745- ipath_dma_alloc_coherent,
42746- ipath_dma_free_coherent
42747+ .mapping_error = ipath_mapping_error,
42748+ .map_single = ipath_dma_map_single,
42749+ .unmap_single = ipath_dma_unmap_single,
42750+ .map_page = ipath_dma_map_page,
42751+ .unmap_page = ipath_dma_unmap_page,
42752+ .map_sg = ipath_map_sg,
42753+ .unmap_sg = ipath_unmap_sg,
42754+ .dma_address = ipath_sg_dma_address,
42755+ .dma_len = ipath_sg_dma_len,
42756+ .sync_single_for_cpu = ipath_sync_single_for_cpu,
42757+ .sync_single_for_device = ipath_sync_single_for_device,
42758+ .alloc_coherent = ipath_dma_alloc_coherent,
42759+ .free_coherent = ipath_dma_free_coherent
42760 };
42761diff --git a/drivers/infiniband/hw/ipath/ipath_rc.c b/drivers/infiniband/hw/ipath/ipath_rc.c
42762index 79b3dbc..96e5fcc 100644
42763--- a/drivers/infiniband/hw/ipath/ipath_rc.c
42764+++ b/drivers/infiniband/hw/ipath/ipath_rc.c
42765@@ -1868,7 +1868,7 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
42766 struct ib_atomic_eth *ateth;
42767 struct ipath_ack_entry *e;
42768 u64 vaddr;
42769- atomic64_t *maddr;
42770+ atomic64_unchecked_t *maddr;
42771 u64 sdata;
42772 u32 rkey;
42773 u8 next;
42774@@ -1903,11 +1903,11 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
42775 IB_ACCESS_REMOTE_ATOMIC)))
42776 goto nack_acc_unlck;
42777 /* Perform atomic OP and save result. */
42778- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
42779+ maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
42780 sdata = be64_to_cpu(ateth->swap_data);
42781 e = &qp->s_ack_queue[qp->r_head_ack_queue];
42782 e->atomic_data = (opcode == OP(FETCH_ADD)) ?
42783- (u64) atomic64_add_return(sdata, maddr) - sdata :
42784+ (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
42785 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
42786 be64_to_cpu(ateth->compare_data),
42787 sdata);
42788diff --git a/drivers/infiniband/hw/ipath/ipath_ruc.c b/drivers/infiniband/hw/ipath/ipath_ruc.c
42789index 1f95bba..9530f87 100644
42790--- a/drivers/infiniband/hw/ipath/ipath_ruc.c
42791+++ b/drivers/infiniband/hw/ipath/ipath_ruc.c
42792@@ -266,7 +266,7 @@ static void ipath_ruc_loopback(struct ipath_qp *sqp)
42793 unsigned long flags;
42794 struct ib_wc wc;
42795 u64 sdata;
42796- atomic64_t *maddr;
42797+ atomic64_unchecked_t *maddr;
42798 enum ib_wc_status send_status;
42799
42800 /*
42801@@ -382,11 +382,11 @@ again:
42802 IB_ACCESS_REMOTE_ATOMIC)))
42803 goto acc_err;
42804 /* Perform atomic OP and save result. */
42805- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
42806+ maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
42807 sdata = wqe->wr.wr.atomic.compare_add;
42808 *(u64 *) sqp->s_sge.sge.vaddr =
42809 (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
42810- (u64) atomic64_add_return(sdata, maddr) - sdata :
42811+ (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
42812 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
42813 sdata, wqe->wr.wr.atomic.swap);
42814 goto send_comp;
42815diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
42816index f2a3f48..673ec79 100644
42817--- a/drivers/infiniband/hw/mlx4/mad.c
42818+++ b/drivers/infiniband/hw/mlx4/mad.c
42819@@ -98,7 +98,7 @@ __be64 mlx4_ib_gen_node_guid(void)
42820
42821 __be64 mlx4_ib_get_new_demux_tid(struct mlx4_ib_demux_ctx *ctx)
42822 {
42823- return cpu_to_be64(atomic_inc_return(&ctx->tid)) |
42824+ return cpu_to_be64(atomic_inc_return_unchecked(&ctx->tid)) |
42825 cpu_to_be64(0xff00000000000000LL);
42826 }
42827
42828diff --git a/drivers/infiniband/hw/mlx4/mcg.c b/drivers/infiniband/hw/mlx4/mcg.c
42829index 25b2cdf..099ff97 100644
42830--- a/drivers/infiniband/hw/mlx4/mcg.c
42831+++ b/drivers/infiniband/hw/mlx4/mcg.c
42832@@ -1040,7 +1040,7 @@ int mlx4_ib_mcg_port_init(struct mlx4_ib_demux_ctx *ctx)
42833 {
42834 char name[20];
42835
42836- atomic_set(&ctx->tid, 0);
42837+ atomic_set_unchecked(&ctx->tid, 0);
42838 sprintf(name, "mlx4_ib_mcg%d", ctx->port);
42839 ctx->mcg_wq = create_singlethread_workqueue(name);
42840 if (!ctx->mcg_wq)
42841diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
42842index a230683..3723f2d 100644
42843--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
42844+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
42845@@ -408,7 +408,7 @@ struct mlx4_ib_demux_ctx {
42846 struct list_head mcg_mgid0_list;
42847 struct workqueue_struct *mcg_wq;
42848 struct mlx4_ib_demux_pv_ctx **tun;
42849- atomic_t tid;
42850+ atomic_unchecked_t tid;
42851 int flushing; /* flushing the work queue */
42852 };
42853
42854diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.c b/drivers/infiniband/hw/mthca/mthca_cmd.c
42855index 9d3e5c1..6f166df 100644
42856--- a/drivers/infiniband/hw/mthca/mthca_cmd.c
42857+++ b/drivers/infiniband/hw/mthca/mthca_cmd.c
42858@@ -772,7 +772,7 @@ static void mthca_setup_cmd_doorbells(struct mthca_dev *dev, u64 base)
42859 mthca_dbg(dev, "Mapped doorbell page for posting FW commands\n");
42860 }
42861
42862-int mthca_QUERY_FW(struct mthca_dev *dev)
42863+int __intentional_overflow(-1) mthca_QUERY_FW(struct mthca_dev *dev)
42864 {
42865 struct mthca_mailbox *mailbox;
42866 u32 *outbox;
42867@@ -1612,7 +1612,7 @@ int mthca_HW2SW_MPT(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
42868 CMD_TIME_CLASS_B);
42869 }
42870
42871-int mthca_WRITE_MTT(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
42872+int __intentional_overflow(-1) mthca_WRITE_MTT(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
42873 int num_mtt)
42874 {
42875 return mthca_cmd(dev, mailbox->dma, num_mtt, 0, CMD_WRITE_MTT,
42876@@ -1634,7 +1634,7 @@ int mthca_MAP_EQ(struct mthca_dev *dev, u64 event_mask, int unmap,
42877 0, CMD_MAP_EQ, CMD_TIME_CLASS_B);
42878 }
42879
42880-int mthca_SW2HW_EQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
42881+int __intentional_overflow(-1) mthca_SW2HW_EQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
42882 int eq_num)
42883 {
42884 return mthca_cmd(dev, mailbox->dma, eq_num, 0, CMD_SW2HW_EQ,
42885@@ -1857,7 +1857,7 @@ int mthca_CONF_SPECIAL_QP(struct mthca_dev *dev, int type, u32 qpn)
42886 CMD_TIME_CLASS_B);
42887 }
42888
42889-int mthca_MAD_IFC(struct mthca_dev *dev, int ignore_mkey, int ignore_bkey,
42890+int __intentional_overflow(-1) mthca_MAD_IFC(struct mthca_dev *dev, int ignore_mkey, int ignore_bkey,
42891 int port, struct ib_wc *in_wc, struct ib_grh *in_grh,
42892 void *in_mad, void *response_mad)
42893 {
42894diff --git a/drivers/infiniband/hw/mthca/mthca_main.c b/drivers/infiniband/hw/mthca/mthca_main.c
42895index 87897b9..7e79542 100644
42896--- a/drivers/infiniband/hw/mthca/mthca_main.c
42897+++ b/drivers/infiniband/hw/mthca/mthca_main.c
42898@@ -692,7 +692,7 @@ err_close:
42899 return err;
42900 }
42901
42902-static int mthca_setup_hca(struct mthca_dev *dev)
42903+static int __intentional_overflow(-1) mthca_setup_hca(struct mthca_dev *dev)
42904 {
42905 int err;
42906
42907diff --git a/drivers/infiniband/hw/mthca/mthca_mr.c b/drivers/infiniband/hw/mthca/mthca_mr.c
42908index ed9a989..6aa5dc2 100644
42909--- a/drivers/infiniband/hw/mthca/mthca_mr.c
42910+++ b/drivers/infiniband/hw/mthca/mthca_mr.c
42911@@ -81,7 +81,7 @@ struct mthca_mpt_entry {
42912 * through the bitmaps)
42913 */
42914
42915-static u32 mthca_buddy_alloc(struct mthca_buddy *buddy, int order)
42916+static u32 __intentional_overflow(-1) mthca_buddy_alloc(struct mthca_buddy *buddy, int order)
42917 {
42918 int o;
42919 int m;
42920@@ -426,7 +426,7 @@ static inline u32 adjust_key(struct mthca_dev *dev, u32 key)
42921 return key;
42922 }
42923
42924-int mthca_mr_alloc(struct mthca_dev *dev, u32 pd, int buffer_size_shift,
42925+int __intentional_overflow(-1) mthca_mr_alloc(struct mthca_dev *dev, u32 pd, int buffer_size_shift,
42926 u64 iova, u64 total_size, u32 access, struct mthca_mr *mr)
42927 {
42928 struct mthca_mailbox *mailbox;
42929@@ -516,7 +516,7 @@ int mthca_mr_alloc_notrans(struct mthca_dev *dev, u32 pd,
42930 return mthca_mr_alloc(dev, pd, 12, 0, ~0ULL, access, mr);
42931 }
42932
42933-int mthca_mr_alloc_phys(struct mthca_dev *dev, u32 pd,
42934+int __intentional_overflow(-1) mthca_mr_alloc_phys(struct mthca_dev *dev, u32 pd,
42935 u64 *buffer_list, int buffer_size_shift,
42936 int list_len, u64 iova, u64 total_size,
42937 u32 access, struct mthca_mr *mr)
42938diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c
42939index 42dde06..1257310 100644
42940--- a/drivers/infiniband/hw/mthca/mthca_provider.c
42941+++ b/drivers/infiniband/hw/mthca/mthca_provider.c
42942@@ -764,7 +764,7 @@ unlock:
42943 return 0;
42944 }
42945
42946-static int mthca_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
42947+static int __intentional_overflow(-1) mthca_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
42948 {
42949 struct mthca_dev *dev = to_mdev(ibcq->device);
42950 struct mthca_cq *cq = to_mcq(ibcq);
42951diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
42952index 353c7b0..c6ce921 100644
42953--- a/drivers/infiniband/hw/nes/nes.c
42954+++ b/drivers/infiniband/hw/nes/nes.c
42955@@ -98,7 +98,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limit max read request size to 256 Bytes");
42956 LIST_HEAD(nes_adapter_list);
42957 static LIST_HEAD(nes_dev_list);
42958
42959-atomic_t qps_destroyed;
42960+atomic_unchecked_t qps_destroyed;
42961
42962 static unsigned int ee_flsh_adapter;
42963 static unsigned int sysfs_nonidx_addr;
42964@@ -269,7 +269,7 @@ static void nes_cqp_rem_ref_callback(struct nes_device *nesdev, struct nes_cqp_r
42965 struct nes_qp *nesqp = cqp_request->cqp_callback_pointer;
42966 struct nes_adapter *nesadapter = nesdev->nesadapter;
42967
42968- atomic_inc(&qps_destroyed);
42969+ atomic_inc_unchecked(&qps_destroyed);
42970
42971 /* Free the control structures */
42972
42973diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
42974index 33cc589..3bd6538 100644
42975--- a/drivers/infiniband/hw/nes/nes.h
42976+++ b/drivers/infiniband/hw/nes/nes.h
42977@@ -177,17 +177,17 @@ extern unsigned int nes_debug_level;
42978 extern unsigned int wqm_quanta;
42979 extern struct list_head nes_adapter_list;
42980
42981-extern atomic_t cm_connects;
42982-extern atomic_t cm_accepts;
42983-extern atomic_t cm_disconnects;
42984-extern atomic_t cm_closes;
42985-extern atomic_t cm_connecteds;
42986-extern atomic_t cm_connect_reqs;
42987-extern atomic_t cm_rejects;
42988-extern atomic_t mod_qp_timouts;
42989-extern atomic_t qps_created;
42990-extern atomic_t qps_destroyed;
42991-extern atomic_t sw_qps_destroyed;
42992+extern atomic_unchecked_t cm_connects;
42993+extern atomic_unchecked_t cm_accepts;
42994+extern atomic_unchecked_t cm_disconnects;
42995+extern atomic_unchecked_t cm_closes;
42996+extern atomic_unchecked_t cm_connecteds;
42997+extern atomic_unchecked_t cm_connect_reqs;
42998+extern atomic_unchecked_t cm_rejects;
42999+extern atomic_unchecked_t mod_qp_timouts;
43000+extern atomic_unchecked_t qps_created;
43001+extern atomic_unchecked_t qps_destroyed;
43002+extern atomic_unchecked_t sw_qps_destroyed;
43003 extern u32 mh_detected;
43004 extern u32 mh_pauses_sent;
43005 extern u32 cm_packets_sent;
43006@@ -196,16 +196,16 @@ extern u32 cm_packets_created;
43007 extern u32 cm_packets_received;
43008 extern u32 cm_packets_dropped;
43009 extern u32 cm_packets_retrans;
43010-extern atomic_t cm_listens_created;
43011-extern atomic_t cm_listens_destroyed;
43012+extern atomic_unchecked_t cm_listens_created;
43013+extern atomic_unchecked_t cm_listens_destroyed;
43014 extern u32 cm_backlog_drops;
43015-extern atomic_t cm_loopbacks;
43016-extern atomic_t cm_nodes_created;
43017-extern atomic_t cm_nodes_destroyed;
43018-extern atomic_t cm_accel_dropped_pkts;
43019-extern atomic_t cm_resets_recvd;
43020-extern atomic_t pau_qps_created;
43021-extern atomic_t pau_qps_destroyed;
43022+extern atomic_unchecked_t cm_loopbacks;
43023+extern atomic_unchecked_t cm_nodes_created;
43024+extern atomic_unchecked_t cm_nodes_destroyed;
43025+extern atomic_unchecked_t cm_accel_dropped_pkts;
43026+extern atomic_unchecked_t cm_resets_recvd;
43027+extern atomic_unchecked_t pau_qps_created;
43028+extern atomic_unchecked_t pau_qps_destroyed;
43029
43030 extern u32 int_mod_timer_init;
43031 extern u32 int_mod_cq_depth_256;
43032diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
43033index 9c9f2f5..2559190 100644
43034--- a/drivers/infiniband/hw/nes/nes_cm.c
43035+++ b/drivers/infiniband/hw/nes/nes_cm.c
43036@@ -68,14 +68,14 @@ u32 cm_packets_dropped;
43037 u32 cm_packets_retrans;
43038 u32 cm_packets_created;
43039 u32 cm_packets_received;
43040-atomic_t cm_listens_created;
43041-atomic_t cm_listens_destroyed;
43042+atomic_unchecked_t cm_listens_created;
43043+atomic_unchecked_t cm_listens_destroyed;
43044 u32 cm_backlog_drops;
43045-atomic_t cm_loopbacks;
43046-atomic_t cm_nodes_created;
43047-atomic_t cm_nodes_destroyed;
43048-atomic_t cm_accel_dropped_pkts;
43049-atomic_t cm_resets_recvd;
43050+atomic_unchecked_t cm_loopbacks;
43051+atomic_unchecked_t cm_nodes_created;
43052+atomic_unchecked_t cm_nodes_destroyed;
43053+atomic_unchecked_t cm_accel_dropped_pkts;
43054+atomic_unchecked_t cm_resets_recvd;
43055
43056 static inline int mini_cm_accelerated(struct nes_cm_core *, struct nes_cm_node *);
43057 static struct nes_cm_listener *mini_cm_listen(struct nes_cm_core *, struct nes_vnic *, struct nes_cm_info *);
43058@@ -133,28 +133,28 @@ static void print_core(struct nes_cm_core *core);
43059 /* instance of function pointers for client API */
43060 /* set address of this instance to cm_core->cm_ops at cm_core alloc */
43061 static struct nes_cm_ops nes_cm_api = {
43062- mini_cm_accelerated,
43063- mini_cm_listen,
43064- mini_cm_del_listen,
43065- mini_cm_connect,
43066- mini_cm_close,
43067- mini_cm_accept,
43068- mini_cm_reject,
43069- mini_cm_recv_pkt,
43070- mini_cm_dealloc_core,
43071- mini_cm_get,
43072- mini_cm_set
43073+ .accelerated = mini_cm_accelerated,
43074+ .listen = mini_cm_listen,
43075+ .stop_listener = mini_cm_del_listen,
43076+ .connect = mini_cm_connect,
43077+ .close = mini_cm_close,
43078+ .accept = mini_cm_accept,
43079+ .reject = mini_cm_reject,
43080+ .recv_pkt = mini_cm_recv_pkt,
43081+ .destroy_cm_core = mini_cm_dealloc_core,
43082+ .get = mini_cm_get,
43083+ .set = mini_cm_set
43084 };
43085
43086 static struct nes_cm_core *g_cm_core;
43087
43088-atomic_t cm_connects;
43089-atomic_t cm_accepts;
43090-atomic_t cm_disconnects;
43091-atomic_t cm_closes;
43092-atomic_t cm_connecteds;
43093-atomic_t cm_connect_reqs;
43094-atomic_t cm_rejects;
43095+atomic_unchecked_t cm_connects;
43096+atomic_unchecked_t cm_accepts;
43097+atomic_unchecked_t cm_disconnects;
43098+atomic_unchecked_t cm_closes;
43099+atomic_unchecked_t cm_connecteds;
43100+atomic_unchecked_t cm_connect_reqs;
43101+atomic_unchecked_t cm_rejects;
43102
43103 int nes_add_ref_cm_node(struct nes_cm_node *cm_node)
43104 {
43105@@ -1272,7 +1272,7 @@ static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core,
43106 kfree(listener);
43107 listener = NULL;
43108 ret = 0;
43109- atomic_inc(&cm_listens_destroyed);
43110+ atomic_inc_unchecked(&cm_listens_destroyed);
43111 } else {
43112 spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
43113 }
43114@@ -1465,7 +1465,7 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
43115 cm_node->rem_mac);
43116
43117 add_hte_node(cm_core, cm_node);
43118- atomic_inc(&cm_nodes_created);
43119+ atomic_inc_unchecked(&cm_nodes_created);
43120
43121 return cm_node;
43122 }
43123@@ -1523,7 +1523,7 @@ static int rem_ref_cm_node(struct nes_cm_core *cm_core,
43124 }
43125
43126 atomic_dec(&cm_core->node_cnt);
43127- atomic_inc(&cm_nodes_destroyed);
43128+ atomic_inc_unchecked(&cm_nodes_destroyed);
43129 nesqp = cm_node->nesqp;
43130 if (nesqp) {
43131 nesqp->cm_node = NULL;
43132@@ -1587,7 +1587,7 @@ static int process_options(struct nes_cm_node *cm_node, u8 *optionsloc,
43133
43134 static void drop_packet(struct sk_buff *skb)
43135 {
43136- atomic_inc(&cm_accel_dropped_pkts);
43137+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
43138 dev_kfree_skb_any(skb);
43139 }
43140
43141@@ -1650,7 +1650,7 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
43142 {
43143
43144 int reset = 0; /* whether to send reset in case of err.. */
43145- atomic_inc(&cm_resets_recvd);
43146+ atomic_inc_unchecked(&cm_resets_recvd);
43147 nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
43148 " refcnt=%d\n", cm_node, cm_node->state,
43149 atomic_read(&cm_node->ref_count));
43150@@ -2291,7 +2291,7 @@ static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core,
43151 rem_ref_cm_node(cm_node->cm_core, cm_node);
43152 return NULL;
43153 }
43154- atomic_inc(&cm_loopbacks);
43155+ atomic_inc_unchecked(&cm_loopbacks);
43156 loopbackremotenode->loopbackpartner = cm_node;
43157 loopbackremotenode->tcp_cntxt.rcv_wscale =
43158 NES_CM_DEFAULT_RCV_WND_SCALE;
43159@@ -2566,7 +2566,7 @@ static int mini_cm_recv_pkt(struct nes_cm_core *cm_core,
43160 nes_queue_mgt_skbs(skb, nesvnic, cm_node->nesqp);
43161 else {
43162 rem_ref_cm_node(cm_core, cm_node);
43163- atomic_inc(&cm_accel_dropped_pkts);
43164+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
43165 dev_kfree_skb_any(skb);
43166 }
43167 break;
43168@@ -2874,7 +2874,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
43169
43170 if ((cm_id) && (cm_id->event_handler)) {
43171 if (issue_disconn) {
43172- atomic_inc(&cm_disconnects);
43173+ atomic_inc_unchecked(&cm_disconnects);
43174 cm_event.event = IW_CM_EVENT_DISCONNECT;
43175 cm_event.status = disconn_status;
43176 cm_event.local_addr = cm_id->local_addr;
43177@@ -2896,7 +2896,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
43178 }
43179
43180 if (issue_close) {
43181- atomic_inc(&cm_closes);
43182+ atomic_inc_unchecked(&cm_closes);
43183 nes_disconnect(nesqp, 1);
43184
43185 cm_id->provider_data = nesqp;
43186@@ -3034,7 +3034,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
43187
43188 nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
43189 nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
43190- atomic_inc(&cm_accepts);
43191+ atomic_inc_unchecked(&cm_accepts);
43192
43193 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
43194 netdev_refcnt_read(nesvnic->netdev));
43195@@ -3223,7 +3223,7 @@ int nes_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
43196 struct nes_cm_core *cm_core;
43197 u8 *start_buff;
43198
43199- atomic_inc(&cm_rejects);
43200+ atomic_inc_unchecked(&cm_rejects);
43201 cm_node = (struct nes_cm_node *)cm_id->provider_data;
43202 loopback = cm_node->loopbackpartner;
43203 cm_core = cm_node->cm_core;
43204@@ -3285,7 +3285,7 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
43205 ntohs(raddr->sin_port), ntohl(laddr->sin_addr.s_addr),
43206 ntohs(laddr->sin_port));
43207
43208- atomic_inc(&cm_connects);
43209+ atomic_inc_unchecked(&cm_connects);
43210 nesqp->active_conn = 1;
43211
43212 /* cache the cm_id in the qp */
43213@@ -3397,7 +3397,7 @@ int nes_create_listen(struct iw_cm_id *cm_id, int backlog)
43214 g_cm_core->api->stop_listener(g_cm_core, (void *)cm_node);
43215 return err;
43216 }
43217- atomic_inc(&cm_listens_created);
43218+ atomic_inc_unchecked(&cm_listens_created);
43219 }
43220
43221 cm_id->add_ref(cm_id);
43222@@ -3504,7 +3504,7 @@ static void cm_event_connected(struct nes_cm_event *event)
43223
43224 if (nesqp->destroyed)
43225 return;
43226- atomic_inc(&cm_connecteds);
43227+ atomic_inc_unchecked(&cm_connecteds);
43228 nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
43229 " local port 0x%04X. jiffies = %lu.\n",
43230 nesqp->hwqp.qp_id, ntohl(raddr->sin_addr.s_addr),
43231@@ -3685,7 +3685,7 @@ static void cm_event_reset(struct nes_cm_event *event)
43232
43233 cm_id->add_ref(cm_id);
43234 ret = cm_id->event_handler(cm_id, &cm_event);
43235- atomic_inc(&cm_closes);
43236+ atomic_inc_unchecked(&cm_closes);
43237 cm_event.event = IW_CM_EVENT_CLOSE;
43238 cm_event.status = 0;
43239 cm_event.provider_data = cm_id->provider_data;
43240@@ -3725,7 +3725,7 @@ static void cm_event_mpa_req(struct nes_cm_event *event)
43241 return;
43242 cm_id = cm_node->cm_id;
43243
43244- atomic_inc(&cm_connect_reqs);
43245+ atomic_inc_unchecked(&cm_connect_reqs);
43246 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
43247 cm_node, cm_id, jiffies);
43248
43249@@ -3769,7 +3769,7 @@ static void cm_event_mpa_reject(struct nes_cm_event *event)
43250 return;
43251 cm_id = cm_node->cm_id;
43252
43253- atomic_inc(&cm_connect_reqs);
43254+ atomic_inc_unchecked(&cm_connect_reqs);
43255 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
43256 cm_node, cm_id, jiffies);
43257
43258diff --git a/drivers/infiniband/hw/nes/nes_mgt.c b/drivers/infiniband/hw/nes/nes_mgt.c
43259index 4166452..fc952c3 100644
43260--- a/drivers/infiniband/hw/nes/nes_mgt.c
43261+++ b/drivers/infiniband/hw/nes/nes_mgt.c
43262@@ -40,8 +40,8 @@
43263 #include "nes.h"
43264 #include "nes_mgt.h"
43265
43266-atomic_t pau_qps_created;
43267-atomic_t pau_qps_destroyed;
43268+atomic_unchecked_t pau_qps_created;
43269+atomic_unchecked_t pau_qps_destroyed;
43270
43271 static void nes_replenish_mgt_rq(struct nes_vnic_mgt *mgtvnic)
43272 {
43273@@ -621,7 +621,7 @@ void nes_destroy_pau_qp(struct nes_device *nesdev, struct nes_qp *nesqp)
43274 {
43275 struct sk_buff *skb;
43276 unsigned long flags;
43277- atomic_inc(&pau_qps_destroyed);
43278+ atomic_inc_unchecked(&pau_qps_destroyed);
43279
43280 /* Free packets that have not yet been forwarded */
43281 /* Lock is acquired by skb_dequeue when removing the skb */
43282@@ -810,7 +810,7 @@ static void nes_mgt_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *
43283 cq->cq_vbase[head].cqe_words[NES_NIC_CQE_HASH_RCVNXT]);
43284 skb_queue_head_init(&nesqp->pau_list);
43285 spin_lock_init(&nesqp->pau_lock);
43286- atomic_inc(&pau_qps_created);
43287+ atomic_inc_unchecked(&pau_qps_created);
43288 nes_change_quad_hash(nesdev, mgtvnic->nesvnic, nesqp);
43289 }
43290
43291diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
43292index 49eb511..a774366 100644
43293--- a/drivers/infiniband/hw/nes/nes_nic.c
43294+++ b/drivers/infiniband/hw/nes/nes_nic.c
43295@@ -1273,39 +1273,39 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
43296 target_stat_values[++index] = mh_detected;
43297 target_stat_values[++index] = mh_pauses_sent;
43298 target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
43299- target_stat_values[++index] = atomic_read(&cm_connects);
43300- target_stat_values[++index] = atomic_read(&cm_accepts);
43301- target_stat_values[++index] = atomic_read(&cm_disconnects);
43302- target_stat_values[++index] = atomic_read(&cm_connecteds);
43303- target_stat_values[++index] = atomic_read(&cm_connect_reqs);
43304- target_stat_values[++index] = atomic_read(&cm_rejects);
43305- target_stat_values[++index] = atomic_read(&mod_qp_timouts);
43306- target_stat_values[++index] = atomic_read(&qps_created);
43307- target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
43308- target_stat_values[++index] = atomic_read(&qps_destroyed);
43309- target_stat_values[++index] = atomic_read(&cm_closes);
43310+ target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
43311+ target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
43312+ target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
43313+ target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
43314+ target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
43315+ target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
43316+ target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
43317+ target_stat_values[++index] = atomic_read_unchecked(&qps_created);
43318+ target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
43319+ target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
43320+ target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
43321 target_stat_values[++index] = cm_packets_sent;
43322 target_stat_values[++index] = cm_packets_bounced;
43323 target_stat_values[++index] = cm_packets_created;
43324 target_stat_values[++index] = cm_packets_received;
43325 target_stat_values[++index] = cm_packets_dropped;
43326 target_stat_values[++index] = cm_packets_retrans;
43327- target_stat_values[++index] = atomic_read(&cm_listens_created);
43328- target_stat_values[++index] = atomic_read(&cm_listens_destroyed);
43329+ target_stat_values[++index] = atomic_read_unchecked(&cm_listens_created);
43330+ target_stat_values[++index] = atomic_read_unchecked(&cm_listens_destroyed);
43331 target_stat_values[++index] = cm_backlog_drops;
43332- target_stat_values[++index] = atomic_read(&cm_loopbacks);
43333- target_stat_values[++index] = atomic_read(&cm_nodes_created);
43334- target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
43335- target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
43336- target_stat_values[++index] = atomic_read(&cm_resets_recvd);
43337+ target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
43338+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
43339+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
43340+ target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
43341+ target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
43342 target_stat_values[++index] = nesadapter->free_4kpbl;
43343 target_stat_values[++index] = nesadapter->free_256pbl;
43344 target_stat_values[++index] = int_mod_timer_init;
43345 target_stat_values[++index] = nesvnic->lro_mgr.stats.aggregated;
43346 target_stat_values[++index] = nesvnic->lro_mgr.stats.flushed;
43347 target_stat_values[++index] = nesvnic->lro_mgr.stats.no_desc;
43348- target_stat_values[++index] = atomic_read(&pau_qps_created);
43349- target_stat_values[++index] = atomic_read(&pau_qps_destroyed);
43350+ target_stat_values[++index] = atomic_read_unchecked(&pau_qps_created);
43351+ target_stat_values[++index] = atomic_read_unchecked(&pau_qps_destroyed);
43352 }
43353
43354 /**
43355diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
43356index eb62461..2b7fc71 100644
43357--- a/drivers/infiniband/hw/nes/nes_verbs.c
43358+++ b/drivers/infiniband/hw/nes/nes_verbs.c
43359@@ -46,9 +46,9 @@
43360
43361 #include <rdma/ib_umem.h>
43362
43363-atomic_t mod_qp_timouts;
43364-atomic_t qps_created;
43365-atomic_t sw_qps_destroyed;
43366+atomic_unchecked_t mod_qp_timouts;
43367+atomic_unchecked_t qps_created;
43368+atomic_unchecked_t sw_qps_destroyed;
43369
43370 static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
43371
43372@@ -1134,7 +1134,7 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
43373 if (init_attr->create_flags)
43374 return ERR_PTR(-EINVAL);
43375
43376- atomic_inc(&qps_created);
43377+ atomic_inc_unchecked(&qps_created);
43378 switch (init_attr->qp_type) {
43379 case IB_QPT_RC:
43380 if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
43381@@ -1466,7 +1466,7 @@ static int nes_destroy_qp(struct ib_qp *ibqp)
43382 struct iw_cm_event cm_event;
43383 int ret = 0;
43384
43385- atomic_inc(&sw_qps_destroyed);
43386+ atomic_inc_unchecked(&sw_qps_destroyed);
43387 nesqp->destroyed = 1;
43388
43389 /* Blow away the connection if it exists. */
43390diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
43391index 1946101..09766d2 100644
43392--- a/drivers/infiniband/hw/qib/qib.h
43393+++ b/drivers/infiniband/hw/qib/qib.h
43394@@ -52,6 +52,7 @@
43395 #include <linux/kref.h>
43396 #include <linux/sched.h>
43397 #include <linux/kthread.h>
43398+#include <linux/slab.h>
43399
43400 #include "qib_common.h"
43401 #include "qib_verbs.h"
43402diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
43403index 24c41ba..102d71f 100644
43404--- a/drivers/input/gameport/gameport.c
43405+++ b/drivers/input/gameport/gameport.c
43406@@ -490,14 +490,14 @@ EXPORT_SYMBOL(gameport_set_phys);
43407 */
43408 static void gameport_init_port(struct gameport *gameport)
43409 {
43410- static atomic_t gameport_no = ATOMIC_INIT(0);
43411+ static atomic_unchecked_t gameport_no = ATOMIC_INIT(0);
43412
43413 __module_get(THIS_MODULE);
43414
43415 mutex_init(&gameport->drv_mutex);
43416 device_initialize(&gameport->dev);
43417 dev_set_name(&gameport->dev, "gameport%lu",
43418- (unsigned long)atomic_inc_return(&gameport_no) - 1);
43419+ (unsigned long)atomic_inc_return_unchecked(&gameport_no) - 1);
43420 gameport->dev.bus = &gameport_bus;
43421 gameport->dev.release = gameport_release_port;
43422 if (gameport->parent)
43423diff --git a/drivers/input/input.c b/drivers/input/input.c
43424index 1c4c0db..6f7abe3 100644
43425--- a/drivers/input/input.c
43426+++ b/drivers/input/input.c
43427@@ -1772,7 +1772,7 @@ EXPORT_SYMBOL_GPL(input_class);
43428 */
43429 struct input_dev *input_allocate_device(void)
43430 {
43431- static atomic_t input_no = ATOMIC_INIT(0);
43432+ static atomic_unchecked_t input_no = ATOMIC_INIT(0);
43433 struct input_dev *dev;
43434
43435 dev = kzalloc(sizeof(struct input_dev), GFP_KERNEL);
43436@@ -1787,7 +1787,7 @@ struct input_dev *input_allocate_device(void)
43437 INIT_LIST_HEAD(&dev->node);
43438
43439 dev_set_name(&dev->dev, "input%ld",
43440- (unsigned long) atomic_inc_return(&input_no) - 1);
43441+ (unsigned long) atomic_inc_return_unchecked(&input_no) - 1);
43442
43443 __module_get(THIS_MODULE);
43444 }
43445diff --git a/drivers/input/joystick/sidewinder.c b/drivers/input/joystick/sidewinder.c
43446index 4a95b22..874c182 100644
43447--- a/drivers/input/joystick/sidewinder.c
43448+++ b/drivers/input/joystick/sidewinder.c
43449@@ -30,6 +30,7 @@
43450 #include <linux/kernel.h>
43451 #include <linux/module.h>
43452 #include <linux/slab.h>
43453+#include <linux/sched.h>
43454 #include <linux/input.h>
43455 #include <linux/gameport.h>
43456 #include <linux/jiffies.h>
43457diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
43458index 603fe0d..f63decc 100644
43459--- a/drivers/input/joystick/xpad.c
43460+++ b/drivers/input/joystick/xpad.c
43461@@ -737,7 +737,7 @@ static void xpad_led_set(struct led_classdev *led_cdev,
43462
43463 static int xpad_led_probe(struct usb_xpad *xpad)
43464 {
43465- static atomic_t led_seq = ATOMIC_INIT(0);
43466+ static atomic_unchecked_t led_seq = ATOMIC_INIT(0);
43467 long led_no;
43468 struct xpad_led *led;
43469 struct led_classdev *led_cdev;
43470@@ -750,7 +750,7 @@ static int xpad_led_probe(struct usb_xpad *xpad)
43471 if (!led)
43472 return -ENOMEM;
43473
43474- led_no = (long)atomic_inc_return(&led_seq) - 1;
43475+ led_no = (long)atomic_inc_return_unchecked(&led_seq) - 1;
43476
43477 snprintf(led->name, sizeof(led->name), "xpad%ld", led_no);
43478 led->xpad = xpad;
43479diff --git a/drivers/input/misc/ims-pcu.c b/drivers/input/misc/ims-pcu.c
43480index e204f26..8459f15 100644
43481--- a/drivers/input/misc/ims-pcu.c
43482+++ b/drivers/input/misc/ims-pcu.c
43483@@ -1621,7 +1621,7 @@ static int ims_pcu_identify_type(struct ims_pcu *pcu, u8 *device_id)
43484
43485 static int ims_pcu_init_application_mode(struct ims_pcu *pcu)
43486 {
43487- static atomic_t device_no = ATOMIC_INIT(0);
43488+ static atomic_unchecked_t device_no = ATOMIC_INIT(0);
43489
43490 const struct ims_pcu_device_info *info;
43491 u8 device_id;
43492@@ -1653,7 +1653,7 @@ static int ims_pcu_init_application_mode(struct ims_pcu *pcu)
43493 }
43494
43495 /* Device appears to be operable, complete initialization */
43496- pcu->device_no = atomic_inc_return(&device_no) - 1;
43497+ pcu->device_no = atomic_inc_return_unchecked(&device_no) - 1;
43498
43499 error = ims_pcu_setup_backlight(pcu);
43500 if (error)
43501diff --git a/drivers/input/mouse/psmouse.h b/drivers/input/mouse/psmouse.h
43502index 2f0b39d..7370f13 100644
43503--- a/drivers/input/mouse/psmouse.h
43504+++ b/drivers/input/mouse/psmouse.h
43505@@ -116,7 +116,7 @@ struct psmouse_attribute {
43506 ssize_t (*set)(struct psmouse *psmouse, void *data,
43507 const char *buf, size_t count);
43508 bool protect;
43509-};
43510+} __do_const;
43511 #define to_psmouse_attr(a) container_of((a), struct psmouse_attribute, dattr)
43512
43513 ssize_t psmouse_attr_show_helper(struct device *dev, struct device_attribute *attr,
43514diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c
43515index b604564..3f14ae4 100644
43516--- a/drivers/input/mousedev.c
43517+++ b/drivers/input/mousedev.c
43518@@ -744,7 +744,7 @@ static ssize_t mousedev_read(struct file *file, char __user *buffer,
43519
43520 spin_unlock_irq(&client->packet_lock);
43521
43522- if (copy_to_user(buffer, data, count))
43523+ if (count > sizeof(data) || copy_to_user(buffer, data, count))
43524 return -EFAULT;
43525
43526 return count;
43527diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
43528index b29134d..394deb0 100644
43529--- a/drivers/input/serio/serio.c
43530+++ b/drivers/input/serio/serio.c
43531@@ -514,7 +514,7 @@ static void serio_release_port(struct device *dev)
43532 */
43533 static void serio_init_port(struct serio *serio)
43534 {
43535- static atomic_t serio_no = ATOMIC_INIT(0);
43536+ static atomic_unchecked_t serio_no = ATOMIC_INIT(0);
43537
43538 __module_get(THIS_MODULE);
43539
43540@@ -525,7 +525,7 @@ static void serio_init_port(struct serio *serio)
43541 mutex_init(&serio->drv_mutex);
43542 device_initialize(&serio->dev);
43543 dev_set_name(&serio->dev, "serio%ld",
43544- (long)atomic_inc_return(&serio_no) - 1);
43545+ (long)atomic_inc_return_unchecked(&serio_no) - 1);
43546 serio->dev.bus = &serio_bus;
43547 serio->dev.release = serio_release_port;
43548 serio->dev.groups = serio_device_attr_groups;
43549diff --git a/drivers/input/serio/serio_raw.c b/drivers/input/serio/serio_raw.c
43550index c9a02fe..0debc75 100644
43551--- a/drivers/input/serio/serio_raw.c
43552+++ b/drivers/input/serio/serio_raw.c
43553@@ -292,7 +292,7 @@ static irqreturn_t serio_raw_interrupt(struct serio *serio, unsigned char data,
43554
43555 static int serio_raw_connect(struct serio *serio, struct serio_driver *drv)
43556 {
43557- static atomic_t serio_raw_no = ATOMIC_INIT(0);
43558+ static atomic_unchecked_t serio_raw_no = ATOMIC_INIT(0);
43559 struct serio_raw *serio_raw;
43560 int err;
43561
43562@@ -303,7 +303,7 @@ static int serio_raw_connect(struct serio *serio, struct serio_driver *drv)
43563 }
43564
43565 snprintf(serio_raw->name, sizeof(serio_raw->name),
43566- "serio_raw%ld", (long)atomic_inc_return(&serio_raw_no) - 1);
43567+ "serio_raw%ld", (long)atomic_inc_return_unchecked(&serio_raw_no) - 1);
43568 kref_init(&serio_raw->kref);
43569 INIT_LIST_HEAD(&serio_raw->client_list);
43570 init_waitqueue_head(&serio_raw->wait);
43571diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
43572index e5555fc..937986d 100644
43573--- a/drivers/iommu/iommu.c
43574+++ b/drivers/iommu/iommu.c
43575@@ -588,7 +588,7 @@ static struct notifier_block iommu_bus_nb = {
43576 static void iommu_bus_init(struct bus_type *bus, struct iommu_ops *ops)
43577 {
43578 bus_register_notifier(bus, &iommu_bus_nb);
43579- bus_for_each_dev(bus, NULL, ops, add_iommu_group);
43580+ bus_for_each_dev(bus, NULL, (void *)ops, add_iommu_group);
43581 }
43582
43583 /**
43584diff --git a/drivers/iommu/irq_remapping.c b/drivers/iommu/irq_remapping.c
43585index 228632c9..edfe331 100644
43586--- a/drivers/iommu/irq_remapping.c
43587+++ b/drivers/iommu/irq_remapping.c
43588@@ -356,7 +356,7 @@ int setup_hpet_msi_remapped(unsigned int irq, unsigned int id)
43589 void panic_if_irq_remap(const char *msg)
43590 {
43591 if (irq_remapping_enabled)
43592- panic(msg);
43593+ panic("%s", msg);
43594 }
43595
43596 static void ir_ack_apic_edge(struct irq_data *data)
43597@@ -377,10 +377,12 @@ static void ir_print_prefix(struct irq_data *data, struct seq_file *p)
43598
43599 void irq_remap_modify_chip_defaults(struct irq_chip *chip)
43600 {
43601- chip->irq_print_chip = ir_print_prefix;
43602- chip->irq_ack = ir_ack_apic_edge;
43603- chip->irq_eoi = ir_ack_apic_level;
43604- chip->irq_set_affinity = x86_io_apic_ops.set_affinity;
43605+ pax_open_kernel();
43606+ *(void **)&chip->irq_print_chip = ir_print_prefix;
43607+ *(void **)&chip->irq_ack = ir_ack_apic_edge;
43608+ *(void **)&chip->irq_eoi = ir_ack_apic_level;
43609+ *(void **)&chip->irq_set_affinity = x86_io_apic_ops.set_affinity;
43610+ pax_close_kernel();
43611 }
43612
43613 bool setup_remapped_irq(int irq, struct irq_cfg *cfg, struct irq_chip *chip)
43614diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
43615index ac2d41b..c657aa4 100644
43616--- a/drivers/irqchip/irq-gic.c
43617+++ b/drivers/irqchip/irq-gic.c
43618@@ -84,7 +84,7 @@ static u8 gic_cpu_map[NR_GIC_CPU_IF] __read_mostly;
43619 * Supported arch specific GIC irq extension.
43620 * Default make them NULL.
43621 */
43622-struct irq_chip gic_arch_extn = {
43623+irq_chip_no_const gic_arch_extn = {
43624 .irq_eoi = NULL,
43625 .irq_mask = NULL,
43626 .irq_unmask = NULL,
43627@@ -336,7 +336,7 @@ static void gic_handle_cascade_irq(unsigned int irq, struct irq_desc *desc)
43628 chained_irq_exit(chip, desc);
43629 }
43630
43631-static struct irq_chip gic_chip = {
43632+static irq_chip_no_const gic_chip __read_only = {
43633 .name = "GIC",
43634 .irq_mask = gic_mask_irq,
43635 .irq_unmask = gic_unmask_irq,
43636diff --git a/drivers/irqchip/irq-renesas-irqc.c b/drivers/irqchip/irq-renesas-irqc.c
43637index 8777065..a4a9967 100644
43638--- a/drivers/irqchip/irq-renesas-irqc.c
43639+++ b/drivers/irqchip/irq-renesas-irqc.c
43640@@ -151,7 +151,7 @@ static int irqc_probe(struct platform_device *pdev)
43641 struct irqc_priv *p;
43642 struct resource *io;
43643 struct resource *irq;
43644- struct irq_chip *irq_chip;
43645+ irq_chip_no_const *irq_chip;
43646 const char *name = dev_name(&pdev->dev);
43647 int ret;
43648 int k;
43649diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c
43650index ac6f72b..81150f2 100644
43651--- a/drivers/isdn/capi/capi.c
43652+++ b/drivers/isdn/capi/capi.c
43653@@ -81,8 +81,8 @@ struct capiminor {
43654
43655 struct capi20_appl *ap;
43656 u32 ncci;
43657- atomic_t datahandle;
43658- atomic_t msgid;
43659+ atomic_unchecked_t datahandle;
43660+ atomic_unchecked_t msgid;
43661
43662 struct tty_port port;
43663 int ttyinstop;
43664@@ -391,7 +391,7 @@ gen_data_b3_resp_for(struct capiminor *mp, struct sk_buff *skb)
43665 capimsg_setu16(s, 2, mp->ap->applid);
43666 capimsg_setu8 (s, 4, CAPI_DATA_B3);
43667 capimsg_setu8 (s, 5, CAPI_RESP);
43668- capimsg_setu16(s, 6, atomic_inc_return(&mp->msgid));
43669+ capimsg_setu16(s, 6, atomic_inc_return_unchecked(&mp->msgid));
43670 capimsg_setu32(s, 8, mp->ncci);
43671 capimsg_setu16(s, 12, datahandle);
43672 }
43673@@ -512,14 +512,14 @@ static void handle_minor_send(struct capiminor *mp)
43674 mp->outbytes -= len;
43675 spin_unlock_bh(&mp->outlock);
43676
43677- datahandle = atomic_inc_return(&mp->datahandle);
43678+ datahandle = atomic_inc_return_unchecked(&mp->datahandle);
43679 skb_push(skb, CAPI_DATA_B3_REQ_LEN);
43680 memset(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
43681 capimsg_setu16(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
43682 capimsg_setu16(skb->data, 2, mp->ap->applid);
43683 capimsg_setu8 (skb->data, 4, CAPI_DATA_B3);
43684 capimsg_setu8 (skb->data, 5, CAPI_REQ);
43685- capimsg_setu16(skb->data, 6, atomic_inc_return(&mp->msgid));
43686+ capimsg_setu16(skb->data, 6, atomic_inc_return_unchecked(&mp->msgid));
43687 capimsg_setu32(skb->data, 8, mp->ncci); /* NCCI */
43688 capimsg_setu32(skb->data, 12, (u32)(long)skb->data);/* Data32 */
43689 capimsg_setu16(skb->data, 16, len); /* Data length */
43690diff --git a/drivers/isdn/gigaset/bas-gigaset.c b/drivers/isdn/gigaset/bas-gigaset.c
43691index c44950d..10ac276 100644
43692--- a/drivers/isdn/gigaset/bas-gigaset.c
43693+++ b/drivers/isdn/gigaset/bas-gigaset.c
43694@@ -2564,22 +2564,22 @@ static int gigaset_post_reset(struct usb_interface *intf)
43695
43696
43697 static const struct gigaset_ops gigops = {
43698- gigaset_write_cmd,
43699- gigaset_write_room,
43700- gigaset_chars_in_buffer,
43701- gigaset_brkchars,
43702- gigaset_init_bchannel,
43703- gigaset_close_bchannel,
43704- gigaset_initbcshw,
43705- gigaset_freebcshw,
43706- gigaset_reinitbcshw,
43707- gigaset_initcshw,
43708- gigaset_freecshw,
43709- gigaset_set_modem_ctrl,
43710- gigaset_baud_rate,
43711- gigaset_set_line_ctrl,
43712- gigaset_isoc_send_skb,
43713- gigaset_isoc_input,
43714+ .write_cmd = gigaset_write_cmd,
43715+ .write_room = gigaset_write_room,
43716+ .chars_in_buffer = gigaset_chars_in_buffer,
43717+ .brkchars = gigaset_brkchars,
43718+ .init_bchannel = gigaset_init_bchannel,
43719+ .close_bchannel = gigaset_close_bchannel,
43720+ .initbcshw = gigaset_initbcshw,
43721+ .freebcshw = gigaset_freebcshw,
43722+ .reinitbcshw = gigaset_reinitbcshw,
43723+ .initcshw = gigaset_initcshw,
43724+ .freecshw = gigaset_freecshw,
43725+ .set_modem_ctrl = gigaset_set_modem_ctrl,
43726+ .baud_rate = gigaset_baud_rate,
43727+ .set_line_ctrl = gigaset_set_line_ctrl,
43728+ .send_skb = gigaset_isoc_send_skb,
43729+ .handle_input = gigaset_isoc_input,
43730 };
43731
43732 /* bas_gigaset_init
43733diff --git a/drivers/isdn/gigaset/interface.c b/drivers/isdn/gigaset/interface.c
43734index 600c79b..3752bab 100644
43735--- a/drivers/isdn/gigaset/interface.c
43736+++ b/drivers/isdn/gigaset/interface.c
43737@@ -130,9 +130,9 @@ static int if_open(struct tty_struct *tty, struct file *filp)
43738 }
43739 tty->driver_data = cs;
43740
43741- ++cs->port.count;
43742+ atomic_inc(&cs->port.count);
43743
43744- if (cs->port.count == 1) {
43745+ if (atomic_read(&cs->port.count) == 1) {
43746 tty_port_tty_set(&cs->port, tty);
43747 cs->port.low_latency = 1;
43748 }
43749@@ -156,9 +156,9 @@ static void if_close(struct tty_struct *tty, struct file *filp)
43750
43751 if (!cs->connected)
43752 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
43753- else if (!cs->port.count)
43754+ else if (!atomic_read(&cs->port.count))
43755 dev_warn(cs->dev, "%s: device not opened\n", __func__);
43756- else if (!--cs->port.count)
43757+ else if (!atomic_dec_return(&cs->port.count))
43758 tty_port_tty_set(&cs->port, NULL);
43759
43760 mutex_unlock(&cs->mutex);
43761diff --git a/drivers/isdn/gigaset/ser-gigaset.c b/drivers/isdn/gigaset/ser-gigaset.c
43762index 8c91fd5..14f13ce 100644
43763--- a/drivers/isdn/gigaset/ser-gigaset.c
43764+++ b/drivers/isdn/gigaset/ser-gigaset.c
43765@@ -453,22 +453,22 @@ static int gigaset_set_line_ctrl(struct cardstate *cs, unsigned cflag)
43766 }
43767
43768 static const struct gigaset_ops ops = {
43769- gigaset_write_cmd,
43770- gigaset_write_room,
43771- gigaset_chars_in_buffer,
43772- gigaset_brkchars,
43773- gigaset_init_bchannel,
43774- gigaset_close_bchannel,
43775- gigaset_initbcshw,
43776- gigaset_freebcshw,
43777- gigaset_reinitbcshw,
43778- gigaset_initcshw,
43779- gigaset_freecshw,
43780- gigaset_set_modem_ctrl,
43781- gigaset_baud_rate,
43782- gigaset_set_line_ctrl,
43783- gigaset_m10x_send_skb, /* asyncdata.c */
43784- gigaset_m10x_input, /* asyncdata.c */
43785+ .write_cmd = gigaset_write_cmd,
43786+ .write_room = gigaset_write_room,
43787+ .chars_in_buffer = gigaset_chars_in_buffer,
43788+ .brkchars = gigaset_brkchars,
43789+ .init_bchannel = gigaset_init_bchannel,
43790+ .close_bchannel = gigaset_close_bchannel,
43791+ .initbcshw = gigaset_initbcshw,
43792+ .freebcshw = gigaset_freebcshw,
43793+ .reinitbcshw = gigaset_reinitbcshw,
43794+ .initcshw = gigaset_initcshw,
43795+ .freecshw = gigaset_freecshw,
43796+ .set_modem_ctrl = gigaset_set_modem_ctrl,
43797+ .baud_rate = gigaset_baud_rate,
43798+ .set_line_ctrl = gigaset_set_line_ctrl,
43799+ .send_skb = gigaset_m10x_send_skb, /* asyncdata.c */
43800+ .handle_input = gigaset_m10x_input, /* asyncdata.c */
43801 };
43802
43803
43804diff --git a/drivers/isdn/gigaset/usb-gigaset.c b/drivers/isdn/gigaset/usb-gigaset.c
43805index d0a41cb..b953e50 100644
43806--- a/drivers/isdn/gigaset/usb-gigaset.c
43807+++ b/drivers/isdn/gigaset/usb-gigaset.c
43808@@ -547,7 +547,7 @@ static int gigaset_brkchars(struct cardstate *cs, const unsigned char buf[6])
43809 gigaset_dbg_buffer(DEBUG_USBREQ, "brkchars", 6, buf);
43810 memcpy(cs->hw.usb->bchars, buf, 6);
43811 return usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x19, 0x41,
43812- 0, 0, &buf, 6, 2000);
43813+ 0, 0, buf, 6, 2000);
43814 }
43815
43816 static void gigaset_freebcshw(struct bc_state *bcs)
43817@@ -869,22 +869,22 @@ static int gigaset_pre_reset(struct usb_interface *intf)
43818 }
43819
43820 static const struct gigaset_ops ops = {
43821- gigaset_write_cmd,
43822- gigaset_write_room,
43823- gigaset_chars_in_buffer,
43824- gigaset_brkchars,
43825- gigaset_init_bchannel,
43826- gigaset_close_bchannel,
43827- gigaset_initbcshw,
43828- gigaset_freebcshw,
43829- gigaset_reinitbcshw,
43830- gigaset_initcshw,
43831- gigaset_freecshw,
43832- gigaset_set_modem_ctrl,
43833- gigaset_baud_rate,
43834- gigaset_set_line_ctrl,
43835- gigaset_m10x_send_skb,
43836- gigaset_m10x_input,
43837+ .write_cmd = gigaset_write_cmd,
43838+ .write_room = gigaset_write_room,
43839+ .chars_in_buffer = gigaset_chars_in_buffer,
43840+ .brkchars = gigaset_brkchars,
43841+ .init_bchannel = gigaset_init_bchannel,
43842+ .close_bchannel = gigaset_close_bchannel,
43843+ .initbcshw = gigaset_initbcshw,
43844+ .freebcshw = gigaset_freebcshw,
43845+ .reinitbcshw = gigaset_reinitbcshw,
43846+ .initcshw = gigaset_initcshw,
43847+ .freecshw = gigaset_freecshw,
43848+ .set_modem_ctrl = gigaset_set_modem_ctrl,
43849+ .baud_rate = gigaset_baud_rate,
43850+ .set_line_ctrl = gigaset_set_line_ctrl,
43851+ .send_skb = gigaset_m10x_send_skb,
43852+ .handle_input = gigaset_m10x_input,
43853 };
43854
43855 /*
43856diff --git a/drivers/isdn/hardware/avm/b1.c b/drivers/isdn/hardware/avm/b1.c
43857index 4d9b195..455075c 100644
43858--- a/drivers/isdn/hardware/avm/b1.c
43859+++ b/drivers/isdn/hardware/avm/b1.c
43860@@ -176,7 +176,7 @@ int b1_load_t4file(avmcard *card, capiloaddatapart *t4file)
43861 }
43862 if (left) {
43863 if (t4file->user) {
43864- if (copy_from_user(buf, dp, left))
43865+ if (left > sizeof buf || copy_from_user(buf, dp, left))
43866 return -EFAULT;
43867 } else {
43868 memcpy(buf, dp, left);
43869@@ -224,7 +224,7 @@ int b1_load_config(avmcard *card, capiloaddatapart *config)
43870 }
43871 if (left) {
43872 if (config->user) {
43873- if (copy_from_user(buf, dp, left))
43874+ if (left > sizeof buf || copy_from_user(buf, dp, left))
43875 return -EFAULT;
43876 } else {
43877 memcpy(buf, dp, left);
43878diff --git a/drivers/isdn/i4l/isdn_common.c b/drivers/isdn/i4l/isdn_common.c
43879index 9bb12ba..d4262f7 100644
43880--- a/drivers/isdn/i4l/isdn_common.c
43881+++ b/drivers/isdn/i4l/isdn_common.c
43882@@ -1651,6 +1651,8 @@ isdn_ioctl(struct file *file, uint cmd, ulong arg)
43883 } else
43884 return -EINVAL;
43885 case IIOCDBGVAR:
43886+ if (!capable(CAP_SYS_RAWIO))
43887+ return -EPERM;
43888 if (arg) {
43889 if (copy_to_user(argp, &dev, sizeof(ulong)))
43890 return -EFAULT;
43891diff --git a/drivers/isdn/i4l/isdn_concap.c b/drivers/isdn/i4l/isdn_concap.c
43892index 91d5730..336523e 100644
43893--- a/drivers/isdn/i4l/isdn_concap.c
43894+++ b/drivers/isdn/i4l/isdn_concap.c
43895@@ -80,9 +80,9 @@ static int isdn_concap_dl_disconn_req(struct concap_proto *concap)
43896 }
43897
43898 struct concap_device_ops isdn_concap_reliable_dl_dops = {
43899- &isdn_concap_dl_data_req,
43900- &isdn_concap_dl_connect_req,
43901- &isdn_concap_dl_disconn_req
43902+ .data_req = &isdn_concap_dl_data_req,
43903+ .connect_req = &isdn_concap_dl_connect_req,
43904+ .disconn_req = &isdn_concap_dl_disconn_req
43905 };
43906
43907 /* The following should better go into a dedicated source file such that
43908diff --git a/drivers/isdn/i4l/isdn_tty.c b/drivers/isdn/i4l/isdn_tty.c
43909index 3c5f249..5fac4d0 100644
43910--- a/drivers/isdn/i4l/isdn_tty.c
43911+++ b/drivers/isdn/i4l/isdn_tty.c
43912@@ -1508,9 +1508,9 @@ isdn_tty_open(struct tty_struct *tty, struct file *filp)
43913
43914 #ifdef ISDN_DEBUG_MODEM_OPEN
43915 printk(KERN_DEBUG "isdn_tty_open %s, count = %d\n", tty->name,
43916- port->count);
43917+ atomic_read(&port->count));
43918 #endif
43919- port->count++;
43920+ atomic_inc(&port->count);
43921 port->tty = tty;
43922 /*
43923 * Start up serial port
43924@@ -1554,7 +1554,7 @@ isdn_tty_close(struct tty_struct *tty, struct file *filp)
43925 #endif
43926 return;
43927 }
43928- if ((tty->count == 1) && (port->count != 1)) {
43929+ if ((tty->count == 1) && (atomic_read(&port->count) != 1)) {
43930 /*
43931 * Uh, oh. tty->count is 1, which means that the tty
43932 * structure will be freed. Info->count should always
43933@@ -1563,15 +1563,15 @@ isdn_tty_close(struct tty_struct *tty, struct file *filp)
43934 * serial port won't be shutdown.
43935 */
43936 printk(KERN_ERR "isdn_tty_close: bad port count; tty->count is 1, "
43937- "info->count is %d\n", port->count);
43938- port->count = 1;
43939+ "info->count is %d\n", atomic_read(&port->count));
43940+ atomic_set(&port->count, 1);
43941 }
43942- if (--port->count < 0) {
43943+ if (atomic_dec_return(&port->count) < 0) {
43944 printk(KERN_ERR "isdn_tty_close: bad port count for ttyi%d: %d\n",
43945- info->line, port->count);
43946- port->count = 0;
43947+ info->line, atomic_read(&port->count));
43948+ atomic_set(&port->count, 0);
43949 }
43950- if (port->count) {
43951+ if (atomic_read(&port->count)) {
43952 #ifdef ISDN_DEBUG_MODEM_OPEN
43953 printk(KERN_DEBUG "isdn_tty_close after info->count != 0\n");
43954 #endif
43955@@ -1625,7 +1625,7 @@ isdn_tty_hangup(struct tty_struct *tty)
43956 if (isdn_tty_paranoia_check(info, tty->name, "isdn_tty_hangup"))
43957 return;
43958 isdn_tty_shutdown(info);
43959- port->count = 0;
43960+ atomic_set(&port->count, 0);
43961 port->flags &= ~ASYNC_NORMAL_ACTIVE;
43962 port->tty = NULL;
43963 wake_up_interruptible(&port->open_wait);
43964@@ -1970,7 +1970,7 @@ isdn_tty_find_icall(int di, int ch, setup_parm *setup)
43965 for (i = 0; i < ISDN_MAX_CHANNELS; i++) {
43966 modem_info *info = &dev->mdm.info[i];
43967
43968- if (info->port.count == 0)
43969+ if (atomic_read(&info->port.count) == 0)
43970 continue;
43971 if ((info->emu.mdmreg[REG_SI1] & si2bit[si1]) && /* SI1 is matching */
43972 (info->emu.mdmreg[REG_SI2] == si2)) { /* SI2 is matching */
43973diff --git a/drivers/isdn/i4l/isdn_x25iface.c b/drivers/isdn/i4l/isdn_x25iface.c
43974index e2d4e58..40cd045 100644
43975--- a/drivers/isdn/i4l/isdn_x25iface.c
43976+++ b/drivers/isdn/i4l/isdn_x25iface.c
43977@@ -53,14 +53,14 @@ static int isdn_x25iface_disconn_ind(struct concap_proto *);
43978
43979
43980 static struct concap_proto_ops ix25_pops = {
43981- &isdn_x25iface_proto_new,
43982- &isdn_x25iface_proto_del,
43983- &isdn_x25iface_proto_restart,
43984- &isdn_x25iface_proto_close,
43985- &isdn_x25iface_xmit,
43986- &isdn_x25iface_receive,
43987- &isdn_x25iface_connect_ind,
43988- &isdn_x25iface_disconn_ind
43989+ .proto_new = &isdn_x25iface_proto_new,
43990+ .proto_del = &isdn_x25iface_proto_del,
43991+ .restart = &isdn_x25iface_proto_restart,
43992+ .close = &isdn_x25iface_proto_close,
43993+ .encap_and_xmit = &isdn_x25iface_xmit,
43994+ .data_ind = &isdn_x25iface_receive,
43995+ .connect_ind = &isdn_x25iface_connect_ind,
43996+ .disconn_ind = &isdn_x25iface_disconn_ind
43997 };
43998
43999 /* error message helper function */
44000diff --git a/drivers/isdn/icn/icn.c b/drivers/isdn/icn/icn.c
44001index 53d487f..cae33fe 100644
44002--- a/drivers/isdn/icn/icn.c
44003+++ b/drivers/isdn/icn/icn.c
44004@@ -1045,7 +1045,7 @@ icn_writecmd(const u_char *buf, int len, int user, icn_card *card)
44005 if (count > len)
44006 count = len;
44007 if (user) {
44008- if (copy_from_user(msg, buf, count))
44009+ if (count > sizeof msg || copy_from_user(msg, buf, count))
44010 return -EFAULT;
44011 } else
44012 memcpy(msg, buf, count);
44013@@ -1155,7 +1155,7 @@ icn_command(isdn_ctrl *c, icn_card *card)
44014 ulong a;
44015 ulong flags;
44016 int i;
44017- char cbuf[60];
44018+ char cbuf[80];
44019 isdn_ctrl cmd;
44020 icn_cdef cdef;
44021 char __user *arg;
44022@@ -1309,7 +1309,6 @@ icn_command(isdn_ctrl *c, icn_card *card)
44023 break;
44024 if ((c->arg & 255) < ICN_BCH) {
44025 char *p;
44026- char dial[50];
44027 char dcode[4];
44028
44029 a = c->arg;
44030@@ -1321,10 +1320,10 @@ icn_command(isdn_ctrl *c, icn_card *card)
44031 } else
44032 /* Normal Dial */
44033 strcpy(dcode, "CAL");
44034- strcpy(dial, p);
44035- sprintf(cbuf, "%02d;D%s_R%s,%02d,%02d,%s\n", (int) (a + 1),
44036- dcode, dial, c->parm.setup.si1,
44037- c->parm.setup.si2, c->parm.setup.eazmsn);
44038+ snprintf(cbuf, sizeof(cbuf),
44039+ "%02d;D%s_R%s,%02d,%02d,%s\n", (int) (a + 1),
44040+ dcode, p, c->parm.setup.si1,
44041+ c->parm.setup.si2, c->parm.setup.eazmsn);
44042 i = icn_writecmd(cbuf, strlen(cbuf), 0, card);
44043 }
44044 break;
44045diff --git a/drivers/isdn/mISDN/dsp_cmx.c b/drivers/isdn/mISDN/dsp_cmx.c
44046index a4f05c5..1433bc5 100644
44047--- a/drivers/isdn/mISDN/dsp_cmx.c
44048+++ b/drivers/isdn/mISDN/dsp_cmx.c
44049@@ -1628,7 +1628,7 @@ unsigned long dsp_spl_jiffies; /* calculate the next time to fire */
44050 static u16 dsp_count; /* last sample count */
44051 static int dsp_count_valid; /* if we have last sample count */
44052
44053-void
44054+void __intentional_overflow(-1)
44055 dsp_cmx_send(void *arg)
44056 {
44057 struct dsp_conf *conf;
44058diff --git a/drivers/leds/leds-clevo-mail.c b/drivers/leds/leds-clevo-mail.c
44059index d93e245..e7ece6b 100644
44060--- a/drivers/leds/leds-clevo-mail.c
44061+++ b/drivers/leds/leds-clevo-mail.c
44062@@ -40,7 +40,7 @@ static int __init clevo_mail_led_dmi_callback(const struct dmi_system_id *id)
44063 * detected as working, but in reality it is not) as low as
44064 * possible.
44065 */
44066-static struct dmi_system_id clevo_mail_led_dmi_table[] __initdata = {
44067+static struct dmi_system_id clevo_mail_led_dmi_table[] __initconst = {
44068 {
44069 .callback = clevo_mail_led_dmi_callback,
44070 .ident = "Clevo D410J",
44071diff --git a/drivers/leds/leds-ss4200.c b/drivers/leds/leds-ss4200.c
44072index 5b8f938..b73d657 100644
44073--- a/drivers/leds/leds-ss4200.c
44074+++ b/drivers/leds/leds-ss4200.c
44075@@ -91,7 +91,7 @@ MODULE_PARM_DESC(nodetect, "Skip DMI-based hardware detection");
44076 * detected as working, but in reality it is not) as low as
44077 * possible.
44078 */
44079-static struct dmi_system_id nas_led_whitelist[] __initdata = {
44080+static struct dmi_system_id nas_led_whitelist[] __initconst = {
44081 {
44082 .callback = ss4200_led_dmi_callback,
44083 .ident = "Intel SS4200-E",
44084diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c
44085index 0bf1e4e..b4bf44e 100644
44086--- a/drivers/lguest/core.c
44087+++ b/drivers/lguest/core.c
44088@@ -97,9 +97,17 @@ static __init int map_switcher(void)
44089 * The end address needs +1 because __get_vm_area allocates an
44090 * extra guard page, so we need space for that.
44091 */
44092+
44093+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
44094+ switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
44095+ VM_ALLOC | VM_KERNEXEC, switcher_addr, switcher_addr
44096+ + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
44097+#else
44098 switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
44099 VM_ALLOC, switcher_addr, switcher_addr
44100 + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
44101+#endif
44102+
44103 if (!switcher_vma) {
44104 err = -ENOMEM;
44105 printk("lguest: could not map switcher pages high\n");
44106@@ -124,7 +132,7 @@ static __init int map_switcher(void)
44107 * Now the Switcher is mapped at the right address, we can't fail!
44108 * Copy in the compiled-in Switcher code (from x86/switcher_32.S).
44109 */
44110- memcpy(switcher_vma->addr, start_switcher_text,
44111+ memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
44112 end_switcher_text - start_switcher_text);
44113
44114 printk(KERN_INFO "lguest: mapped switcher at %p\n",
44115diff --git a/drivers/lguest/page_tables.c b/drivers/lguest/page_tables.c
44116index bfb39bb..08a603b 100644
44117--- a/drivers/lguest/page_tables.c
44118+++ b/drivers/lguest/page_tables.c
44119@@ -559,7 +559,7 @@ void pin_page(struct lg_cpu *cpu, unsigned long vaddr)
44120 /*:*/
44121
44122 #ifdef CONFIG_X86_PAE
44123-static void release_pmd(pmd_t *spmd)
44124+static void __intentional_overflow(-1) release_pmd(pmd_t *spmd)
44125 {
44126 /* If the entry's not present, there's nothing to release. */
44127 if (pmd_flags(*spmd) & _PAGE_PRESENT) {
44128diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
44129index 922a1ac..9dd0c2a 100644
44130--- a/drivers/lguest/x86/core.c
44131+++ b/drivers/lguest/x86/core.c
44132@@ -59,7 +59,7 @@ static struct {
44133 /* Offset from where switcher.S was compiled to where we've copied it */
44134 static unsigned long switcher_offset(void)
44135 {
44136- return switcher_addr - (unsigned long)start_switcher_text;
44137+ return switcher_addr - (unsigned long)ktla_ktva(start_switcher_text);
44138 }
44139
44140 /* This cpu's struct lguest_pages (after the Switcher text page) */
44141@@ -99,7 +99,13 @@ static void copy_in_guest_info(struct lg_cpu *cpu, struct lguest_pages *pages)
44142 * These copies are pretty cheap, so we do them unconditionally: */
44143 /* Save the current Host top-level page directory.
44144 */
44145+
44146+#ifdef CONFIG_PAX_PER_CPU_PGD
44147+ pages->state.host_cr3 = read_cr3();
44148+#else
44149 pages->state.host_cr3 = __pa(current->mm->pgd);
44150+#endif
44151+
44152 /*
44153 * Set up the Guest's page tables to see this CPU's pages (and no
44154 * other CPU's pages).
44155@@ -477,7 +483,7 @@ void __init lguest_arch_host_init(void)
44156 * compiled-in switcher code and the high-mapped copy we just made.
44157 */
44158 for (i = 0; i < IDT_ENTRIES; i++)
44159- default_idt_entries[i] += switcher_offset();
44160+ default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
44161
44162 /*
44163 * Set up the Switcher's per-cpu areas.
44164@@ -560,7 +566,7 @@ void __init lguest_arch_host_init(void)
44165 * it will be undisturbed when we switch. To change %cs and jump we
44166 * need this structure to feed to Intel's "lcall" instruction.
44167 */
44168- lguest_entry.offset = (long)switch_to_guest + switcher_offset();
44169+ lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
44170 lguest_entry.segment = LGUEST_CS;
44171
44172 /*
44173diff --git a/drivers/lguest/x86/switcher_32.S b/drivers/lguest/x86/switcher_32.S
44174index 40634b0..4f5855e 100644
44175--- a/drivers/lguest/x86/switcher_32.S
44176+++ b/drivers/lguest/x86/switcher_32.S
44177@@ -87,6 +87,7 @@
44178 #include <asm/page.h>
44179 #include <asm/segment.h>
44180 #include <asm/lguest.h>
44181+#include <asm/processor-flags.h>
44182
44183 // We mark the start of the code to copy
44184 // It's placed in .text tho it's never run here
44185@@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
44186 // Changes type when we load it: damn Intel!
44187 // For after we switch over our page tables
44188 // That entry will be read-only: we'd crash.
44189+
44190+#ifdef CONFIG_PAX_KERNEXEC
44191+ mov %cr0, %edx
44192+ xor $X86_CR0_WP, %edx
44193+ mov %edx, %cr0
44194+#endif
44195+
44196 movl $(GDT_ENTRY_TSS*8), %edx
44197 ltr %dx
44198
44199@@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
44200 // Let's clear it again for our return.
44201 // The GDT descriptor of the Host
44202 // Points to the table after two "size" bytes
44203- movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
44204+ movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
44205 // Clear "used" from type field (byte 5, bit 2)
44206- andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
44207+ andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
44208+
44209+#ifdef CONFIG_PAX_KERNEXEC
44210+ mov %cr0, %eax
44211+ xor $X86_CR0_WP, %eax
44212+ mov %eax, %cr0
44213+#endif
44214
44215 // Once our page table's switched, the Guest is live!
44216 // The Host fades as we run this final step.
44217@@ -295,13 +309,12 @@ deliver_to_host:
44218 // I consulted gcc, and it gave
44219 // These instructions, which I gladly credit:
44220 leal (%edx,%ebx,8), %eax
44221- movzwl (%eax),%edx
44222- movl 4(%eax), %eax
44223- xorw %ax, %ax
44224- orl %eax, %edx
44225+ movl 4(%eax), %edx
44226+ movw (%eax), %dx
44227 // Now the address of the handler's in %edx
44228 // We call it now: its "iret" drops us home.
44229- jmp *%edx
44230+ ljmp $__KERNEL_CS, $1f
44231+1: jmp *%edx
44232
44233 // Every interrupt can come to us here
44234 // But we must truly tell each apart.
44235diff --git a/drivers/md/bcache/closure.h b/drivers/md/bcache/closure.h
44236index 7ef7461..5a09dac 100644
44237--- a/drivers/md/bcache/closure.h
44238+++ b/drivers/md/bcache/closure.h
44239@@ -238,7 +238,7 @@ static inline void closure_set_stopped(struct closure *cl)
44240 static inline void set_closure_fn(struct closure *cl, closure_fn *fn,
44241 struct workqueue_struct *wq)
44242 {
44243- BUG_ON(object_is_on_stack(cl));
44244+ BUG_ON(object_starts_on_stack(cl));
44245 closure_set_ip(cl);
44246 cl->fn = fn;
44247 cl->wq = wq;
44248diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
44249index 4195a01..42527ac 100644
44250--- a/drivers/md/bitmap.c
44251+++ b/drivers/md/bitmap.c
44252@@ -1779,7 +1779,7 @@ void bitmap_status(struct seq_file *seq, struct bitmap *bitmap)
44253 chunk_kb ? "KB" : "B");
44254 if (bitmap->storage.file) {
44255 seq_printf(seq, ", file: ");
44256- seq_path(seq, &bitmap->storage.file->f_path, " \t\n");
44257+ seq_path(seq, &bitmap->storage.file->f_path, " \t\n\\");
44258 }
44259
44260 seq_printf(seq, "\n");
44261diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
44262index 5152142..623d141 100644
44263--- a/drivers/md/dm-ioctl.c
44264+++ b/drivers/md/dm-ioctl.c
44265@@ -1769,7 +1769,7 @@ static int validate_params(uint cmd, struct dm_ioctl *param)
44266 cmd == DM_LIST_VERSIONS_CMD)
44267 return 0;
44268
44269- if ((cmd == DM_DEV_CREATE_CMD)) {
44270+ if (cmd == DM_DEV_CREATE_CMD) {
44271 if (!*param->name) {
44272 DMWARN("name not supplied when creating device");
44273 return -EINVAL;
44274diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
44275index 7dfdb5c..4caada6 100644
44276--- a/drivers/md/dm-raid1.c
44277+++ b/drivers/md/dm-raid1.c
44278@@ -40,7 +40,7 @@ enum dm_raid1_error {
44279
44280 struct mirror {
44281 struct mirror_set *ms;
44282- atomic_t error_count;
44283+ atomic_unchecked_t error_count;
44284 unsigned long error_type;
44285 struct dm_dev *dev;
44286 sector_t offset;
44287@@ -186,7 +186,7 @@ static struct mirror *get_valid_mirror(struct mirror_set *ms)
44288 struct mirror *m;
44289
44290 for (m = ms->mirror; m < ms->mirror + ms->nr_mirrors; m++)
44291- if (!atomic_read(&m->error_count))
44292+ if (!atomic_read_unchecked(&m->error_count))
44293 return m;
44294
44295 return NULL;
44296@@ -218,7 +218,7 @@ static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
44297 * simple way to tell if a device has encountered
44298 * errors.
44299 */
44300- atomic_inc(&m->error_count);
44301+ atomic_inc_unchecked(&m->error_count);
44302
44303 if (test_and_set_bit(error_type, &m->error_type))
44304 return;
44305@@ -409,7 +409,7 @@ static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
44306 struct mirror *m = get_default_mirror(ms);
44307
44308 do {
44309- if (likely(!atomic_read(&m->error_count)))
44310+ if (likely(!atomic_read_unchecked(&m->error_count)))
44311 return m;
44312
44313 if (m-- == ms->mirror)
44314@@ -423,7 +423,7 @@ static int default_ok(struct mirror *m)
44315 {
44316 struct mirror *default_mirror = get_default_mirror(m->ms);
44317
44318- return !atomic_read(&default_mirror->error_count);
44319+ return !atomic_read_unchecked(&default_mirror->error_count);
44320 }
44321
44322 static int mirror_available(struct mirror_set *ms, struct bio *bio)
44323@@ -560,7 +560,7 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads)
44324 */
44325 if (likely(region_in_sync(ms, region, 1)))
44326 m = choose_mirror(ms, bio->bi_iter.bi_sector);
44327- else if (m && atomic_read(&m->error_count))
44328+ else if (m && atomic_read_unchecked(&m->error_count))
44329 m = NULL;
44330
44331 if (likely(m))
44332@@ -927,7 +927,7 @@ static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
44333 }
44334
44335 ms->mirror[mirror].ms = ms;
44336- atomic_set(&(ms->mirror[mirror].error_count), 0);
44337+ atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
44338 ms->mirror[mirror].error_type = 0;
44339 ms->mirror[mirror].offset = offset;
44340
44341@@ -1342,7 +1342,7 @@ static void mirror_resume(struct dm_target *ti)
44342 */
44343 static char device_status_char(struct mirror *m)
44344 {
44345- if (!atomic_read(&(m->error_count)))
44346+ if (!atomic_read_unchecked(&(m->error_count)))
44347 return 'A';
44348
44349 return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' :
44350diff --git a/drivers/md/dm-stats.c b/drivers/md/dm-stats.c
44351index 28a9012..9c0f6a5 100644
44352--- a/drivers/md/dm-stats.c
44353+++ b/drivers/md/dm-stats.c
44354@@ -382,7 +382,7 @@ do_sync_free:
44355 synchronize_rcu_expedited();
44356 dm_stat_free(&s->rcu_head);
44357 } else {
44358- ACCESS_ONCE(dm_stat_need_rcu_barrier) = 1;
44359+ ACCESS_ONCE_RW(dm_stat_need_rcu_barrier) = 1;
44360 call_rcu(&s->rcu_head, dm_stat_free);
44361 }
44362 return 0;
44363@@ -554,8 +554,8 @@ void dm_stats_account_io(struct dm_stats *stats, unsigned long bi_rw,
44364 ((bi_rw & (REQ_WRITE | REQ_DISCARD)) ==
44365 (ACCESS_ONCE(last->last_rw) & (REQ_WRITE | REQ_DISCARD)))
44366 ));
44367- ACCESS_ONCE(last->last_sector) = end_sector;
44368- ACCESS_ONCE(last->last_rw) = bi_rw;
44369+ ACCESS_ONCE_RW(last->last_sector) = end_sector;
44370+ ACCESS_ONCE_RW(last->last_rw) = bi_rw;
44371 }
44372
44373 rcu_read_lock();
44374diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
44375index d1600d2..4c3af3a 100644
44376--- a/drivers/md/dm-stripe.c
44377+++ b/drivers/md/dm-stripe.c
44378@@ -21,7 +21,7 @@ struct stripe {
44379 struct dm_dev *dev;
44380 sector_t physical_start;
44381
44382- atomic_t error_count;
44383+ atomic_unchecked_t error_count;
44384 };
44385
44386 struct stripe_c {
44387@@ -186,7 +186,7 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
44388 kfree(sc);
44389 return r;
44390 }
44391- atomic_set(&(sc->stripe[i].error_count), 0);
44392+ atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
44393 }
44394
44395 ti->private = sc;
44396@@ -330,7 +330,7 @@ static void stripe_status(struct dm_target *ti, status_type_t type,
44397 DMEMIT("%d ", sc->stripes);
44398 for (i = 0; i < sc->stripes; i++) {
44399 DMEMIT("%s ", sc->stripe[i].dev->name);
44400- buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
44401+ buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
44402 'D' : 'A';
44403 }
44404 buffer[i] = '\0';
44405@@ -375,8 +375,8 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio, int error)
44406 */
44407 for (i = 0; i < sc->stripes; i++)
44408 if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
44409- atomic_inc(&(sc->stripe[i].error_count));
44410- if (atomic_read(&(sc->stripe[i].error_count)) <
44411+ atomic_inc_unchecked(&(sc->stripe[i].error_count));
44412+ if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
44413 DM_IO_ERROR_THRESHOLD)
44414 schedule_work(&sc->trigger_event);
44415 }
44416diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
44417index 6a7f2b8..fea0bde 100644
44418--- a/drivers/md/dm-table.c
44419+++ b/drivers/md/dm-table.c
44420@@ -274,7 +274,7 @@ static struct dm_dev_internal *find_device(struct list_head *l, dev_t dev)
44421 static int open_dev(struct dm_dev_internal *d, dev_t dev,
44422 struct mapped_device *md)
44423 {
44424- static char *_claim_ptr = "I belong to device-mapper";
44425+ static char _claim_ptr[] = "I belong to device-mapper";
44426 struct block_device *bdev;
44427
44428 int r;
44429@@ -342,7 +342,7 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
44430 if (!dev_size)
44431 return 0;
44432
44433- if ((start >= dev_size) || (start + len > dev_size)) {
44434+ if ((start >= dev_size) || (len > dev_size - start)) {
44435 DMWARN("%s: %s too small for target: "
44436 "start=%llu, len=%llu, dev_size=%llu",
44437 dm_device_name(ti->table->md), bdevname(bdev, b),
44438diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
44439index b086a94..74cb67e 100644
44440--- a/drivers/md/dm-thin-metadata.c
44441+++ b/drivers/md/dm-thin-metadata.c
44442@@ -404,7 +404,7 @@ static void __setup_btree_details(struct dm_pool_metadata *pmd)
44443 {
44444 pmd->info.tm = pmd->tm;
44445 pmd->info.levels = 2;
44446- pmd->info.value_type.context = pmd->data_sm;
44447+ pmd->info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
44448 pmd->info.value_type.size = sizeof(__le64);
44449 pmd->info.value_type.inc = data_block_inc;
44450 pmd->info.value_type.dec = data_block_dec;
44451@@ -423,7 +423,7 @@ static void __setup_btree_details(struct dm_pool_metadata *pmd)
44452
44453 pmd->bl_info.tm = pmd->tm;
44454 pmd->bl_info.levels = 1;
44455- pmd->bl_info.value_type.context = pmd->data_sm;
44456+ pmd->bl_info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
44457 pmd->bl_info.value_type.size = sizeof(__le64);
44458 pmd->bl_info.value_type.inc = data_block_inc;
44459 pmd->bl_info.value_type.dec = data_block_dec;
44460diff --git a/drivers/md/dm.c b/drivers/md/dm.c
44461index 8c53b09..f1fb2b0 100644
44462--- a/drivers/md/dm.c
44463+++ b/drivers/md/dm.c
44464@@ -185,9 +185,9 @@ struct mapped_device {
44465 /*
44466 * Event handling.
44467 */
44468- atomic_t event_nr;
44469+ atomic_unchecked_t event_nr;
44470 wait_queue_head_t eventq;
44471- atomic_t uevent_seq;
44472+ atomic_unchecked_t uevent_seq;
44473 struct list_head uevent_list;
44474 spinlock_t uevent_lock; /* Protect access to uevent_list */
44475
44476@@ -1888,8 +1888,8 @@ static struct mapped_device *alloc_dev(int minor)
44477 spin_lock_init(&md->deferred_lock);
44478 atomic_set(&md->holders, 1);
44479 atomic_set(&md->open_count, 0);
44480- atomic_set(&md->event_nr, 0);
44481- atomic_set(&md->uevent_seq, 0);
44482+ atomic_set_unchecked(&md->event_nr, 0);
44483+ atomic_set_unchecked(&md->uevent_seq, 0);
44484 INIT_LIST_HEAD(&md->uevent_list);
44485 spin_lock_init(&md->uevent_lock);
44486
44487@@ -2043,7 +2043,7 @@ static void event_callback(void *context)
44488
44489 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
44490
44491- atomic_inc(&md->event_nr);
44492+ atomic_inc_unchecked(&md->event_nr);
44493 wake_up(&md->eventq);
44494 }
44495
44496@@ -2736,18 +2736,18 @@ int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
44497
44498 uint32_t dm_next_uevent_seq(struct mapped_device *md)
44499 {
44500- return atomic_add_return(1, &md->uevent_seq);
44501+ return atomic_add_return_unchecked(1, &md->uevent_seq);
44502 }
44503
44504 uint32_t dm_get_event_nr(struct mapped_device *md)
44505 {
44506- return atomic_read(&md->event_nr);
44507+ return atomic_read_unchecked(&md->event_nr);
44508 }
44509
44510 int dm_wait_event(struct mapped_device *md, int event_nr)
44511 {
44512 return wait_event_interruptible(md->eventq,
44513- (event_nr != atomic_read(&md->event_nr)));
44514+ (event_nr != atomic_read_unchecked(&md->event_nr)));
44515 }
44516
44517 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
44518diff --git a/drivers/md/md.c b/drivers/md/md.c
44519index 51c431c..be0fbd6 100644
44520--- a/drivers/md/md.c
44521+++ b/drivers/md/md.c
44522@@ -194,10 +194,10 @@ EXPORT_SYMBOL_GPL(bio_clone_mddev);
44523 * start build, activate spare
44524 */
44525 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
44526-static atomic_t md_event_count;
44527+static atomic_unchecked_t md_event_count;
44528 void md_new_event(struct mddev *mddev)
44529 {
44530- atomic_inc(&md_event_count);
44531+ atomic_inc_unchecked(&md_event_count);
44532 wake_up(&md_event_waiters);
44533 }
44534 EXPORT_SYMBOL_GPL(md_new_event);
44535@@ -207,7 +207,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
44536 */
44537 static void md_new_event_inintr(struct mddev *mddev)
44538 {
44539- atomic_inc(&md_event_count);
44540+ atomic_inc_unchecked(&md_event_count);
44541 wake_up(&md_event_waiters);
44542 }
44543
44544@@ -1462,7 +1462,7 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
44545 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE) &&
44546 (le32_to_cpu(sb->feature_map) & MD_FEATURE_NEW_OFFSET))
44547 rdev->new_data_offset += (s32)le32_to_cpu(sb->new_offset);
44548- atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
44549+ atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
44550
44551 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
44552 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
44553@@ -1713,7 +1713,7 @@ static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
44554 else
44555 sb->resync_offset = cpu_to_le64(0);
44556
44557- sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
44558+ sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
44559
44560 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
44561 sb->size = cpu_to_le64(mddev->dev_sectors);
44562@@ -2725,7 +2725,7 @@ __ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store);
44563 static ssize_t
44564 errors_show(struct md_rdev *rdev, char *page)
44565 {
44566- return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
44567+ return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
44568 }
44569
44570 static ssize_t
44571@@ -2734,7 +2734,7 @@ errors_store(struct md_rdev *rdev, const char *buf, size_t len)
44572 char *e;
44573 unsigned long n = simple_strtoul(buf, &e, 10);
44574 if (*buf && (*e == 0 || *e == '\n')) {
44575- atomic_set(&rdev->corrected_errors, n);
44576+ atomic_set_unchecked(&rdev->corrected_errors, n);
44577 return len;
44578 }
44579 return -EINVAL;
44580@@ -3183,8 +3183,8 @@ int md_rdev_init(struct md_rdev *rdev)
44581 rdev->sb_loaded = 0;
44582 rdev->bb_page = NULL;
44583 atomic_set(&rdev->nr_pending, 0);
44584- atomic_set(&rdev->read_errors, 0);
44585- atomic_set(&rdev->corrected_errors, 0);
44586+ atomic_set_unchecked(&rdev->read_errors, 0);
44587+ atomic_set_unchecked(&rdev->corrected_errors, 0);
44588
44589 INIT_LIST_HEAD(&rdev->same_set);
44590 init_waitqueue_head(&rdev->blocked_wait);
44591@@ -7075,7 +7075,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
44592
44593 spin_unlock(&pers_lock);
44594 seq_printf(seq, "\n");
44595- seq->poll_event = atomic_read(&md_event_count);
44596+ seq->poll_event = atomic_read_unchecked(&md_event_count);
44597 return 0;
44598 }
44599 if (v == (void*)2) {
44600@@ -7178,7 +7178,7 @@ static int md_seq_open(struct inode *inode, struct file *file)
44601 return error;
44602
44603 seq = file->private_data;
44604- seq->poll_event = atomic_read(&md_event_count);
44605+ seq->poll_event = atomic_read_unchecked(&md_event_count);
44606 return error;
44607 }
44608
44609@@ -7192,7 +7192,7 @@ static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
44610 /* always allow read */
44611 mask = POLLIN | POLLRDNORM;
44612
44613- if (seq->poll_event != atomic_read(&md_event_count))
44614+ if (seq->poll_event != atomic_read_unchecked(&md_event_count))
44615 mask |= POLLERR | POLLPRI;
44616 return mask;
44617 }
44618@@ -7236,7 +7236,7 @@ static int is_mddev_idle(struct mddev *mddev, int init)
44619 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
44620 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
44621 (int)part_stat_read(&disk->part0, sectors[1]) -
44622- atomic_read(&disk->sync_io);
44623+ atomic_read_unchecked(&disk->sync_io);
44624 /* sync IO will cause sync_io to increase before the disk_stats
44625 * as sync_io is counted when a request starts, and
44626 * disk_stats is counted when it completes.
44627diff --git a/drivers/md/md.h b/drivers/md/md.h
44628index 07bba96..2d6788c 100644
44629--- a/drivers/md/md.h
44630+++ b/drivers/md/md.h
44631@@ -94,13 +94,13 @@ struct md_rdev {
44632 * only maintained for arrays that
44633 * support hot removal
44634 */
44635- atomic_t read_errors; /* number of consecutive read errors that
44636+ atomic_unchecked_t read_errors; /* number of consecutive read errors that
44637 * we have tried to ignore.
44638 */
44639 struct timespec last_read_error; /* monotonic time since our
44640 * last read error
44641 */
44642- atomic_t corrected_errors; /* number of corrected read errors,
44643+ atomic_unchecked_t corrected_errors; /* number of corrected read errors,
44644 * for reporting to userspace and storing
44645 * in superblock.
44646 */
44647@@ -449,7 +449,7 @@ static inline void rdev_dec_pending(struct md_rdev *rdev, struct mddev *mddev)
44648
44649 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
44650 {
44651- atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
44652+ atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
44653 }
44654
44655 struct md_personality
44656diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c
44657index 786b689..ea8c956 100644
44658--- a/drivers/md/persistent-data/dm-space-map-metadata.c
44659+++ b/drivers/md/persistent-data/dm-space-map-metadata.c
44660@@ -679,7 +679,7 @@ static int sm_metadata_extend(struct dm_space_map *sm, dm_block_t extra_blocks)
44661 * Flick into a mode where all blocks get allocated in the new area.
44662 */
44663 smm->begin = old_len;
44664- memcpy(sm, &bootstrap_ops, sizeof(*sm));
44665+ memcpy((void *)sm, &bootstrap_ops, sizeof(*sm));
44666
44667 /*
44668 * Extend.
44669@@ -710,7 +710,7 @@ out:
44670 /*
44671 * Switch back to normal behaviour.
44672 */
44673- memcpy(sm, &ops, sizeof(*sm));
44674+ memcpy((void *)sm, &ops, sizeof(*sm));
44675 return r;
44676 }
44677
44678diff --git a/drivers/md/persistent-data/dm-space-map.h b/drivers/md/persistent-data/dm-space-map.h
44679index 3e6d115..ffecdeb 100644
44680--- a/drivers/md/persistent-data/dm-space-map.h
44681+++ b/drivers/md/persistent-data/dm-space-map.h
44682@@ -71,6 +71,7 @@ struct dm_space_map {
44683 dm_sm_threshold_fn fn,
44684 void *context);
44685 };
44686+typedef struct dm_space_map __no_const dm_space_map_no_const;
44687
44688 /*----------------------------------------------------------------*/
44689
44690diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
44691index 56e24c0..e1c8e1f 100644
44692--- a/drivers/md/raid1.c
44693+++ b/drivers/md/raid1.c
44694@@ -1931,7 +1931,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
44695 if (r1_sync_page_io(rdev, sect, s,
44696 bio->bi_io_vec[idx].bv_page,
44697 READ) != 0)
44698- atomic_add(s, &rdev->corrected_errors);
44699+ atomic_add_unchecked(s, &rdev->corrected_errors);
44700 }
44701 sectors -= s;
44702 sect += s;
44703@@ -2165,7 +2165,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
44704 test_bit(In_sync, &rdev->flags)) {
44705 if (r1_sync_page_io(rdev, sect, s,
44706 conf->tmppage, READ)) {
44707- atomic_add(s, &rdev->corrected_errors);
44708+ atomic_add_unchecked(s, &rdev->corrected_errors);
44709 printk(KERN_INFO
44710 "md/raid1:%s: read error corrected "
44711 "(%d sectors at %llu on %s)\n",
44712diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
44713index cb882aa..9bd076e 100644
44714--- a/drivers/md/raid10.c
44715+++ b/drivers/md/raid10.c
44716@@ -1949,7 +1949,7 @@ static void end_sync_read(struct bio *bio, int error)
44717 /* The write handler will notice the lack of
44718 * R10BIO_Uptodate and record any errors etc
44719 */
44720- atomic_add(r10_bio->sectors,
44721+ atomic_add_unchecked(r10_bio->sectors,
44722 &conf->mirrors[d].rdev->corrected_errors);
44723
44724 /* for reconstruct, we always reschedule after a read.
44725@@ -2307,7 +2307,7 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
44726 {
44727 struct timespec cur_time_mon;
44728 unsigned long hours_since_last;
44729- unsigned int read_errors = atomic_read(&rdev->read_errors);
44730+ unsigned int read_errors = atomic_read_unchecked(&rdev->read_errors);
44731
44732 ktime_get_ts(&cur_time_mon);
44733
44734@@ -2329,9 +2329,9 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
44735 * overflowing the shift of read_errors by hours_since_last.
44736 */
44737 if (hours_since_last >= 8 * sizeof(read_errors))
44738- atomic_set(&rdev->read_errors, 0);
44739+ atomic_set_unchecked(&rdev->read_errors, 0);
44740 else
44741- atomic_set(&rdev->read_errors, read_errors >> hours_since_last);
44742+ atomic_set_unchecked(&rdev->read_errors, read_errors >> hours_since_last);
44743 }
44744
44745 static int r10_sync_page_io(struct md_rdev *rdev, sector_t sector,
44746@@ -2385,8 +2385,8 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
44747 return;
44748
44749 check_decay_read_errors(mddev, rdev);
44750- atomic_inc(&rdev->read_errors);
44751- if (atomic_read(&rdev->read_errors) > max_read_errors) {
44752+ atomic_inc_unchecked(&rdev->read_errors);
44753+ if (atomic_read_unchecked(&rdev->read_errors) > max_read_errors) {
44754 char b[BDEVNAME_SIZE];
44755 bdevname(rdev->bdev, b);
44756
44757@@ -2394,7 +2394,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
44758 "md/raid10:%s: %s: Raid device exceeded "
44759 "read_error threshold [cur %d:max %d]\n",
44760 mdname(mddev), b,
44761- atomic_read(&rdev->read_errors), max_read_errors);
44762+ atomic_read_unchecked(&rdev->read_errors), max_read_errors);
44763 printk(KERN_NOTICE
44764 "md/raid10:%s: %s: Failing raid device\n",
44765 mdname(mddev), b);
44766@@ -2549,7 +2549,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
44767 sect +
44768 choose_data_offset(r10_bio, rdev)),
44769 bdevname(rdev->bdev, b));
44770- atomic_add(s, &rdev->corrected_errors);
44771+ atomic_add_unchecked(s, &rdev->corrected_errors);
44772 }
44773
44774 rdev_dec_pending(rdev, mddev);
44775diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
44776index 16f5c21..4df20dc 100644
44777--- a/drivers/md/raid5.c
44778+++ b/drivers/md/raid5.c
44779@@ -1991,21 +1991,21 @@ static void raid5_end_read_request(struct bio * bi, int error)
44780 mdname(conf->mddev), STRIPE_SECTORS,
44781 (unsigned long long)s,
44782 bdevname(rdev->bdev, b));
44783- atomic_add(STRIPE_SECTORS, &rdev->corrected_errors);
44784+ atomic_add_unchecked(STRIPE_SECTORS, &rdev->corrected_errors);
44785 clear_bit(R5_ReadError, &sh->dev[i].flags);
44786 clear_bit(R5_ReWrite, &sh->dev[i].flags);
44787 } else if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags))
44788 clear_bit(R5_ReadNoMerge, &sh->dev[i].flags);
44789
44790- if (atomic_read(&rdev->read_errors))
44791- atomic_set(&rdev->read_errors, 0);
44792+ if (atomic_read_unchecked(&rdev->read_errors))
44793+ atomic_set_unchecked(&rdev->read_errors, 0);
44794 } else {
44795 const char *bdn = bdevname(rdev->bdev, b);
44796 int retry = 0;
44797 int set_bad = 0;
44798
44799 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
44800- atomic_inc(&rdev->read_errors);
44801+ atomic_inc_unchecked(&rdev->read_errors);
44802 if (test_bit(R5_ReadRepl, &sh->dev[i].flags))
44803 printk_ratelimited(
44804 KERN_WARNING
44805@@ -2033,7 +2033,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
44806 mdname(conf->mddev),
44807 (unsigned long long)s,
44808 bdn);
44809- } else if (atomic_read(&rdev->read_errors)
44810+ } else if (atomic_read_unchecked(&rdev->read_errors)
44811 > conf->max_nr_stripes)
44812 printk(KERN_WARNING
44813 "md/raid:%s: Too many read errors, failing device %s.\n",
44814diff --git a/drivers/media/dvb-core/dvbdev.c b/drivers/media/dvb-core/dvbdev.c
44815index 983db75..ef9248c 100644
44816--- a/drivers/media/dvb-core/dvbdev.c
44817+++ b/drivers/media/dvb-core/dvbdev.c
44818@@ -185,7 +185,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
44819 const struct dvb_device *template, void *priv, int type)
44820 {
44821 struct dvb_device *dvbdev;
44822- struct file_operations *dvbdevfops;
44823+ file_operations_no_const *dvbdevfops;
44824 struct device *clsdev;
44825 int minor;
44826 int id;
44827diff --git a/drivers/media/dvb-frontends/dib3000.h b/drivers/media/dvb-frontends/dib3000.h
44828index 9b6c3bb..baeb5c7 100644
44829--- a/drivers/media/dvb-frontends/dib3000.h
44830+++ b/drivers/media/dvb-frontends/dib3000.h
44831@@ -39,7 +39,7 @@ struct dib_fe_xfer_ops
44832 int (*fifo_ctrl)(struct dvb_frontend *fe, int onoff);
44833 int (*pid_ctrl)(struct dvb_frontend *fe, int index, int pid, int onoff);
44834 int (*tuner_pass_ctrl)(struct dvb_frontend *fe, int onoff, u8 pll_ctrl);
44835-};
44836+} __no_const;
44837
44838 #if IS_ENABLED(CONFIG_DVB_DIB3000MB)
44839 extern struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
44840diff --git a/drivers/media/pci/cx88/cx88-video.c b/drivers/media/pci/cx88/cx88-video.c
44841index ed8cb90..5ef7f79 100644
44842--- a/drivers/media/pci/cx88/cx88-video.c
44843+++ b/drivers/media/pci/cx88/cx88-video.c
44844@@ -50,9 +50,9 @@ MODULE_VERSION(CX88_VERSION);
44845
44846 /* ------------------------------------------------------------------ */
44847
44848-static unsigned int video_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
44849-static unsigned int vbi_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
44850-static unsigned int radio_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
44851+static int video_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
44852+static int vbi_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
44853+static int radio_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
44854
44855 module_param_array(video_nr, int, NULL, 0444);
44856 module_param_array(vbi_nr, int, NULL, 0444);
44857diff --git a/drivers/media/pci/ivtv/ivtv-driver.c b/drivers/media/pci/ivtv/ivtv-driver.c
44858index 802642d..5534900 100644
44859--- a/drivers/media/pci/ivtv/ivtv-driver.c
44860+++ b/drivers/media/pci/ivtv/ivtv-driver.c
44861@@ -83,7 +83,7 @@ static struct pci_device_id ivtv_pci_tbl[] = {
44862 MODULE_DEVICE_TABLE(pci,ivtv_pci_tbl);
44863
44864 /* ivtv instance counter */
44865-static atomic_t ivtv_instance = ATOMIC_INIT(0);
44866+static atomic_unchecked_t ivtv_instance = ATOMIC_INIT(0);
44867
44868 /* Parameter declarations */
44869 static int cardtype[IVTV_MAX_CARDS];
44870diff --git a/drivers/media/platform/omap/omap_vout.c b/drivers/media/platform/omap/omap_vout.c
44871index dfd0a21..6bbb465 100644
44872--- a/drivers/media/platform/omap/omap_vout.c
44873+++ b/drivers/media/platform/omap/omap_vout.c
44874@@ -63,7 +63,6 @@ enum omap_vout_channels {
44875 OMAP_VIDEO2,
44876 };
44877
44878-static struct videobuf_queue_ops video_vbq_ops;
44879 /* Variables configurable through module params*/
44880 static u32 video1_numbuffers = 3;
44881 static u32 video2_numbuffers = 3;
44882@@ -1014,6 +1013,12 @@ static int omap_vout_open(struct file *file)
44883 {
44884 struct videobuf_queue *q;
44885 struct omap_vout_device *vout = NULL;
44886+ static struct videobuf_queue_ops video_vbq_ops = {
44887+ .buf_setup = omap_vout_buffer_setup,
44888+ .buf_prepare = omap_vout_buffer_prepare,
44889+ .buf_release = omap_vout_buffer_release,
44890+ .buf_queue = omap_vout_buffer_queue,
44891+ };
44892
44893 vout = video_drvdata(file);
44894 v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev, "Entering %s\n", __func__);
44895@@ -1031,10 +1036,6 @@ static int omap_vout_open(struct file *file)
44896 vout->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
44897
44898 q = &vout->vbq;
44899- video_vbq_ops.buf_setup = omap_vout_buffer_setup;
44900- video_vbq_ops.buf_prepare = omap_vout_buffer_prepare;
44901- video_vbq_ops.buf_release = omap_vout_buffer_release;
44902- video_vbq_ops.buf_queue = omap_vout_buffer_queue;
44903 spin_lock_init(&vout->vbq_lock);
44904
44905 videobuf_queue_dma_contig_init(q, &video_vbq_ops, q->dev,
44906diff --git a/drivers/media/platform/s5p-tv/mixer.h b/drivers/media/platform/s5p-tv/mixer.h
44907index fb2acc5..a2fcbdc4 100644
44908--- a/drivers/media/platform/s5p-tv/mixer.h
44909+++ b/drivers/media/platform/s5p-tv/mixer.h
44910@@ -156,7 +156,7 @@ struct mxr_layer {
44911 /** layer index (unique identifier) */
44912 int idx;
44913 /** callbacks for layer methods */
44914- struct mxr_layer_ops ops;
44915+ struct mxr_layer_ops *ops;
44916 /** format array */
44917 const struct mxr_format **fmt_array;
44918 /** size of format array */
44919diff --git a/drivers/media/platform/s5p-tv/mixer_grp_layer.c b/drivers/media/platform/s5p-tv/mixer_grp_layer.c
44920index 74344c7..a39e70e 100644
44921--- a/drivers/media/platform/s5p-tv/mixer_grp_layer.c
44922+++ b/drivers/media/platform/s5p-tv/mixer_grp_layer.c
44923@@ -235,7 +235,7 @@ struct mxr_layer *mxr_graph_layer_create(struct mxr_device *mdev, int idx)
44924 {
44925 struct mxr_layer *layer;
44926 int ret;
44927- struct mxr_layer_ops ops = {
44928+ static struct mxr_layer_ops ops = {
44929 .release = mxr_graph_layer_release,
44930 .buffer_set = mxr_graph_buffer_set,
44931 .stream_set = mxr_graph_stream_set,
44932diff --git a/drivers/media/platform/s5p-tv/mixer_reg.c b/drivers/media/platform/s5p-tv/mixer_reg.c
44933index b713403..53cb5ad 100644
44934--- a/drivers/media/platform/s5p-tv/mixer_reg.c
44935+++ b/drivers/media/platform/s5p-tv/mixer_reg.c
44936@@ -276,7 +276,7 @@ static void mxr_irq_layer_handle(struct mxr_layer *layer)
44937 layer->update_buf = next;
44938 }
44939
44940- layer->ops.buffer_set(layer, layer->update_buf);
44941+ layer->ops->buffer_set(layer, layer->update_buf);
44942
44943 if (done && done != layer->shadow_buf)
44944 vb2_buffer_done(&done->vb, VB2_BUF_STATE_DONE);
44945diff --git a/drivers/media/platform/s5p-tv/mixer_video.c b/drivers/media/platform/s5p-tv/mixer_video.c
44946index c5059ba..2649f28 100644
44947--- a/drivers/media/platform/s5p-tv/mixer_video.c
44948+++ b/drivers/media/platform/s5p-tv/mixer_video.c
44949@@ -210,7 +210,7 @@ static void mxr_layer_default_geo(struct mxr_layer *layer)
44950 layer->geo.src.height = layer->geo.src.full_height;
44951
44952 mxr_geometry_dump(mdev, &layer->geo);
44953- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
44954+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
44955 mxr_geometry_dump(mdev, &layer->geo);
44956 }
44957
44958@@ -228,7 +228,7 @@ static void mxr_layer_update_output(struct mxr_layer *layer)
44959 layer->geo.dst.full_width = mbus_fmt.width;
44960 layer->geo.dst.full_height = mbus_fmt.height;
44961 layer->geo.dst.field = mbus_fmt.field;
44962- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
44963+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
44964
44965 mxr_geometry_dump(mdev, &layer->geo);
44966 }
44967@@ -334,7 +334,7 @@ static int mxr_s_fmt(struct file *file, void *priv,
44968 /* set source size to highest accepted value */
44969 geo->src.full_width = max(geo->dst.full_width, pix->width);
44970 geo->src.full_height = max(geo->dst.full_height, pix->height);
44971- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
44972+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
44973 mxr_geometry_dump(mdev, &layer->geo);
44974 /* set cropping to total visible screen */
44975 geo->src.width = pix->width;
44976@@ -342,12 +342,12 @@ static int mxr_s_fmt(struct file *file, void *priv,
44977 geo->src.x_offset = 0;
44978 geo->src.y_offset = 0;
44979 /* assure consistency of geometry */
44980- layer->ops.fix_geometry(layer, MXR_GEOMETRY_CROP, MXR_NO_OFFSET);
44981+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_CROP, MXR_NO_OFFSET);
44982 mxr_geometry_dump(mdev, &layer->geo);
44983 /* set full size to lowest possible value */
44984 geo->src.full_width = 0;
44985 geo->src.full_height = 0;
44986- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
44987+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
44988 mxr_geometry_dump(mdev, &layer->geo);
44989
44990 /* returning results */
44991@@ -474,7 +474,7 @@ static int mxr_s_selection(struct file *file, void *fh,
44992 target->width = s->r.width;
44993 target->height = s->r.height;
44994
44995- layer->ops.fix_geometry(layer, stage, s->flags);
44996+ layer->ops->fix_geometry(layer, stage, s->flags);
44997
44998 /* retrieve update selection rectangle */
44999 res.left = target->x_offset;
45000@@ -955,13 +955,13 @@ static int start_streaming(struct vb2_queue *vq, unsigned int count)
45001 mxr_output_get(mdev);
45002
45003 mxr_layer_update_output(layer);
45004- layer->ops.format_set(layer);
45005+ layer->ops->format_set(layer);
45006 /* enabling layer in hardware */
45007 spin_lock_irqsave(&layer->enq_slock, flags);
45008 layer->state = MXR_LAYER_STREAMING;
45009 spin_unlock_irqrestore(&layer->enq_slock, flags);
45010
45011- layer->ops.stream_set(layer, MXR_ENABLE);
45012+ layer->ops->stream_set(layer, MXR_ENABLE);
45013 mxr_streamer_get(mdev);
45014
45015 return 0;
45016@@ -1031,7 +1031,7 @@ static int stop_streaming(struct vb2_queue *vq)
45017 spin_unlock_irqrestore(&layer->enq_slock, flags);
45018
45019 /* disabling layer in hardware */
45020- layer->ops.stream_set(layer, MXR_DISABLE);
45021+ layer->ops->stream_set(layer, MXR_DISABLE);
45022 /* remove one streamer */
45023 mxr_streamer_put(mdev);
45024 /* allow changes in output configuration */
45025@@ -1070,8 +1070,8 @@ void mxr_base_layer_unregister(struct mxr_layer *layer)
45026
45027 void mxr_layer_release(struct mxr_layer *layer)
45028 {
45029- if (layer->ops.release)
45030- layer->ops.release(layer);
45031+ if (layer->ops->release)
45032+ layer->ops->release(layer);
45033 }
45034
45035 void mxr_base_layer_release(struct mxr_layer *layer)
45036@@ -1097,7 +1097,7 @@ struct mxr_layer *mxr_base_layer_create(struct mxr_device *mdev,
45037
45038 layer->mdev = mdev;
45039 layer->idx = idx;
45040- layer->ops = *ops;
45041+ layer->ops = ops;
45042
45043 spin_lock_init(&layer->enq_slock);
45044 INIT_LIST_HEAD(&layer->enq_list);
45045diff --git a/drivers/media/platform/s5p-tv/mixer_vp_layer.c b/drivers/media/platform/s5p-tv/mixer_vp_layer.c
45046index c9388c4..ce71ece 100644
45047--- a/drivers/media/platform/s5p-tv/mixer_vp_layer.c
45048+++ b/drivers/media/platform/s5p-tv/mixer_vp_layer.c
45049@@ -206,7 +206,7 @@ struct mxr_layer *mxr_vp_layer_create(struct mxr_device *mdev, int idx)
45050 {
45051 struct mxr_layer *layer;
45052 int ret;
45053- struct mxr_layer_ops ops = {
45054+ static struct mxr_layer_ops ops = {
45055 .release = mxr_vp_layer_release,
45056 .buffer_set = mxr_vp_buffer_set,
45057 .stream_set = mxr_vp_stream_set,
45058diff --git a/drivers/media/platform/vivi.c b/drivers/media/platform/vivi.c
45059index 2d4e73b..8b4d5b6 100644
45060--- a/drivers/media/platform/vivi.c
45061+++ b/drivers/media/platform/vivi.c
45062@@ -58,8 +58,8 @@ MODULE_AUTHOR("Mauro Carvalho Chehab, Ted Walther and John Sokol");
45063 MODULE_LICENSE("Dual BSD/GPL");
45064 MODULE_VERSION(VIVI_VERSION);
45065
45066-static unsigned video_nr = -1;
45067-module_param(video_nr, uint, 0644);
45068+static int video_nr = -1;
45069+module_param(video_nr, int, 0644);
45070 MODULE_PARM_DESC(video_nr, "videoX start number, -1 is autodetect");
45071
45072 static unsigned n_devs = 1;
45073diff --git a/drivers/media/radio/radio-cadet.c b/drivers/media/radio/radio-cadet.c
45074index 545c04c..a14bded 100644
45075--- a/drivers/media/radio/radio-cadet.c
45076+++ b/drivers/media/radio/radio-cadet.c
45077@@ -324,6 +324,8 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
45078 unsigned char readbuf[RDS_BUFFER];
45079 int i = 0;
45080
45081+ if (count > RDS_BUFFER)
45082+ return -EFAULT;
45083 mutex_lock(&dev->lock);
45084 if (dev->rdsstat == 0)
45085 cadet_start_rds(dev);
45086@@ -339,7 +341,7 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
45087 while (i < count && dev->rdsin != dev->rdsout)
45088 readbuf[i++] = dev->rdsbuf[dev->rdsout++];
45089
45090- if (i && copy_to_user(data, readbuf, i))
45091+ if (i > sizeof(readbuf) || copy_to_user(data, readbuf, i))
45092 i = -EFAULT;
45093 unlock:
45094 mutex_unlock(&dev->lock);
45095diff --git a/drivers/media/radio/radio-maxiradio.c b/drivers/media/radio/radio-maxiradio.c
45096index 5236035..c622c74 100644
45097--- a/drivers/media/radio/radio-maxiradio.c
45098+++ b/drivers/media/radio/radio-maxiradio.c
45099@@ -61,7 +61,7 @@ MODULE_PARM_DESC(radio_nr, "Radio device number");
45100 /* TEA5757 pin mappings */
45101 static const int clk = 1, data = 2, wren = 4, mo_st = 8, power = 16;
45102
45103-static atomic_t maxiradio_instance = ATOMIC_INIT(0);
45104+static atomic_unchecked_t maxiradio_instance = ATOMIC_INIT(0);
45105
45106 #define PCI_VENDOR_ID_GUILLEMOT 0x5046
45107 #define PCI_DEVICE_ID_GUILLEMOT_MAXIRADIO 0x1001
45108diff --git a/drivers/media/radio/radio-shark.c b/drivers/media/radio/radio-shark.c
45109index 050b3bb..79f62b9 100644
45110--- a/drivers/media/radio/radio-shark.c
45111+++ b/drivers/media/radio/radio-shark.c
45112@@ -79,7 +79,7 @@ struct shark_device {
45113 u32 last_val;
45114 };
45115
45116-static atomic_t shark_instance = ATOMIC_INIT(0);
45117+static atomic_unchecked_t shark_instance = ATOMIC_INIT(0);
45118
45119 static void shark_write_val(struct snd_tea575x *tea, u32 val)
45120 {
45121diff --git a/drivers/media/radio/radio-shark2.c b/drivers/media/radio/radio-shark2.c
45122index 8654e0d..0608a64 100644
45123--- a/drivers/media/radio/radio-shark2.c
45124+++ b/drivers/media/radio/radio-shark2.c
45125@@ -74,7 +74,7 @@ struct shark_device {
45126 u8 *transfer_buffer;
45127 };
45128
45129-static atomic_t shark_instance = ATOMIC_INIT(0);
45130+static atomic_unchecked_t shark_instance = ATOMIC_INIT(0);
45131
45132 static int shark_write_reg(struct radio_tea5777 *tea, u64 reg)
45133 {
45134diff --git a/drivers/media/radio/radio-si476x.c b/drivers/media/radio/radio-si476x.c
45135index 2fd9009..278cc1e 100644
45136--- a/drivers/media/radio/radio-si476x.c
45137+++ b/drivers/media/radio/radio-si476x.c
45138@@ -1445,7 +1445,7 @@ static int si476x_radio_probe(struct platform_device *pdev)
45139 struct si476x_radio *radio;
45140 struct v4l2_ctrl *ctrl;
45141
45142- static atomic_t instance = ATOMIC_INIT(0);
45143+ static atomic_unchecked_t instance = ATOMIC_INIT(0);
45144
45145 radio = devm_kzalloc(&pdev->dev, sizeof(*radio), GFP_KERNEL);
45146 if (!radio)
45147diff --git a/drivers/media/usb/dvb-usb/cxusb.c b/drivers/media/usb/dvb-usb/cxusb.c
45148index a1c641e..3007da9 100644
45149--- a/drivers/media/usb/dvb-usb/cxusb.c
45150+++ b/drivers/media/usb/dvb-usb/cxusb.c
45151@@ -1112,7 +1112,7 @@ static struct dib0070_config dib7070p_dib0070_config = {
45152
45153 struct dib0700_adapter_state {
45154 int (*set_param_save) (struct dvb_frontend *);
45155-};
45156+} __no_const;
45157
45158 static int dib7070_set_param_override(struct dvb_frontend *fe)
45159 {
45160diff --git a/drivers/media/usb/dvb-usb/dw2102.c b/drivers/media/usb/dvb-usb/dw2102.c
45161index ae0f56a..ec71784 100644
45162--- a/drivers/media/usb/dvb-usb/dw2102.c
45163+++ b/drivers/media/usb/dvb-usb/dw2102.c
45164@@ -118,7 +118,7 @@ struct su3000_state {
45165
45166 struct s6x0_state {
45167 int (*old_set_voltage)(struct dvb_frontend *f, fe_sec_voltage_t v);
45168-};
45169+} __no_const;
45170
45171 /* debug */
45172 static int dvb_usb_dw2102_debug;
45173diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
45174index fca336b..fb70ab7 100644
45175--- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
45176+++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
45177@@ -328,7 +328,7 @@ struct v4l2_buffer32 {
45178 __u32 reserved;
45179 };
45180
45181-static int get_v4l2_plane32(struct v4l2_plane *up, struct v4l2_plane32 *up32,
45182+static int get_v4l2_plane32(struct v4l2_plane __user *up, struct v4l2_plane32 __user *up32,
45183 enum v4l2_memory memory)
45184 {
45185 void __user *up_pln;
45186@@ -357,7 +357,7 @@ static int get_v4l2_plane32(struct v4l2_plane *up, struct v4l2_plane32 *up32,
45187 return 0;
45188 }
45189
45190-static int put_v4l2_plane32(struct v4l2_plane *up, struct v4l2_plane32 *up32,
45191+static int put_v4l2_plane32(struct v4l2_plane __user *up, struct v4l2_plane32 __user *up32,
45192 enum v4l2_memory memory)
45193 {
45194 if (copy_in_user(up32, up, 2 * sizeof(__u32)) ||
45195@@ -427,7 +427,7 @@ static int get_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user
45196 * by passing a very big num_planes value */
45197 uplane = compat_alloc_user_space(num_planes *
45198 sizeof(struct v4l2_plane));
45199- kp->m.planes = uplane;
45200+ kp->m.planes = (struct v4l2_plane __force_kernel *)uplane;
45201
45202 while (--num_planes >= 0) {
45203 ret = get_v4l2_plane32(uplane, uplane32, kp->memory);
45204@@ -498,7 +498,7 @@ static int put_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user
45205 if (num_planes == 0)
45206 return 0;
45207
45208- uplane = kp->m.planes;
45209+ uplane = (struct v4l2_plane __force_user *)kp->m.planes;
45210 if (get_user(p, &up->m.planes))
45211 return -EFAULT;
45212 uplane32 = compat_ptr(p);
45213@@ -552,7 +552,7 @@ static int get_v4l2_framebuffer32(struct v4l2_framebuffer *kp, struct v4l2_frame
45214 get_user(kp->capability, &up->capability) ||
45215 get_user(kp->flags, &up->flags))
45216 return -EFAULT;
45217- kp->base = compat_ptr(tmp);
45218+ kp->base = (void __force_kernel *)compat_ptr(tmp);
45219 get_v4l2_pix_format(&kp->fmt, &up->fmt);
45220 return 0;
45221 }
45222@@ -658,7 +658,7 @@ static int get_v4l2_ext_controls32(struct v4l2_ext_controls *kp, struct v4l2_ext
45223 n * sizeof(struct v4l2_ext_control32)))
45224 return -EFAULT;
45225 kcontrols = compat_alloc_user_space(n * sizeof(struct v4l2_ext_control));
45226- kp->controls = kcontrols;
45227+ kp->controls = (struct v4l2_ext_control __force_kernel *)kcontrols;
45228 while (--n >= 0) {
45229 if (copy_in_user(kcontrols, ucontrols, sizeof(*ucontrols)))
45230 return -EFAULT;
45231@@ -680,7 +680,7 @@ static int get_v4l2_ext_controls32(struct v4l2_ext_controls *kp, struct v4l2_ext
45232 static int put_v4l2_ext_controls32(struct v4l2_ext_controls *kp, struct v4l2_ext_controls32 __user *up)
45233 {
45234 struct v4l2_ext_control32 __user *ucontrols;
45235- struct v4l2_ext_control __user *kcontrols = kp->controls;
45236+ struct v4l2_ext_control __user *kcontrols = (struct v4l2_ext_control __force_user *)kp->controls;
45237 int n = kp->count;
45238 compat_caddr_t p;
45239
45240@@ -774,7 +774,7 @@ static int put_v4l2_subdev_edid32(struct v4l2_subdev_edid *kp, struct v4l2_subde
45241 put_user(kp->start_block, &up->start_block) ||
45242 put_user(kp->blocks, &up->blocks) ||
45243 put_user(tmp, &up->edid) ||
45244- copy_to_user(kp->reserved, up->reserved, sizeof(kp->reserved)))
45245+ copy_to_user(up->reserved, kp->reserved, sizeof(kp->reserved)))
45246 return -EFAULT;
45247 return 0;
45248 }
45249diff --git a/drivers/media/v4l2-core/v4l2-ctrls.c b/drivers/media/v4l2-core/v4l2-ctrls.c
45250index 6ff002b..6b9316b 100644
45251--- a/drivers/media/v4l2-core/v4l2-ctrls.c
45252+++ b/drivers/media/v4l2-core/v4l2-ctrls.c
45253@@ -1401,8 +1401,8 @@ static int validate_new(const struct v4l2_ctrl *ctrl,
45254 return 0;
45255
45256 case V4L2_CTRL_TYPE_STRING:
45257- len = strlen(c->string);
45258- if (len < ctrl->minimum)
45259+ len = strlen_user(c->string);
45260+ if (!len || len < ctrl->minimum)
45261 return -ERANGE;
45262 if ((len - ctrl->minimum) % ctrl->step)
45263 return -ERANGE;
45264diff --git a/drivers/media/v4l2-core/v4l2-device.c b/drivers/media/v4l2-core/v4l2-device.c
45265index 02d1b63..5fd6b16 100644
45266--- a/drivers/media/v4l2-core/v4l2-device.c
45267+++ b/drivers/media/v4l2-core/v4l2-device.c
45268@@ -75,9 +75,9 @@ int v4l2_device_put(struct v4l2_device *v4l2_dev)
45269 EXPORT_SYMBOL_GPL(v4l2_device_put);
45270
45271 int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
45272- atomic_t *instance)
45273+ atomic_unchecked_t *instance)
45274 {
45275- int num = atomic_inc_return(instance) - 1;
45276+ int num = atomic_inc_return_unchecked(instance) - 1;
45277 int len = strlen(basename);
45278
45279 if (basename[len - 1] >= '0' && basename[len - 1] <= '9')
45280diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
45281index 707aef7..93b8ac0 100644
45282--- a/drivers/media/v4l2-core/v4l2-ioctl.c
45283+++ b/drivers/media/v4l2-core/v4l2-ioctl.c
45284@@ -1942,7 +1942,8 @@ struct v4l2_ioctl_info {
45285 struct file *file, void *fh, void *p);
45286 } u;
45287 void (*debug)(const void *arg, bool write_only);
45288-};
45289+} __do_const;
45290+typedef struct v4l2_ioctl_info __no_const v4l2_ioctl_info_no_const;
45291
45292 /* This control needs a priority check */
45293 #define INFO_FL_PRIO (1 << 0)
45294@@ -2123,7 +2124,7 @@ static long __video_do_ioctl(struct file *file,
45295 struct video_device *vfd = video_devdata(file);
45296 const struct v4l2_ioctl_ops *ops = vfd->ioctl_ops;
45297 bool write_only = false;
45298- struct v4l2_ioctl_info default_info;
45299+ v4l2_ioctl_info_no_const default_info;
45300 const struct v4l2_ioctl_info *info;
45301 void *fh = file->private_data;
45302 struct v4l2_fh *vfh = NULL;
45303@@ -2197,7 +2198,7 @@ done:
45304 }
45305
45306 static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
45307- void * __user *user_ptr, void ***kernel_ptr)
45308+ void __user **user_ptr, void ***kernel_ptr)
45309 {
45310 int ret = 0;
45311
45312@@ -2213,7 +2214,7 @@ static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
45313 ret = -EINVAL;
45314 break;
45315 }
45316- *user_ptr = (void __user *)buf->m.planes;
45317+ *user_ptr = (void __force_user *)buf->m.planes;
45318 *kernel_ptr = (void *)&buf->m.planes;
45319 *array_size = sizeof(struct v4l2_plane) * buf->length;
45320 ret = 1;
45321@@ -2248,7 +2249,7 @@ static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
45322 ret = -EINVAL;
45323 break;
45324 }
45325- *user_ptr = (void __user *)ctrls->controls;
45326+ *user_ptr = (void __force_user *)ctrls->controls;
45327 *kernel_ptr = (void *)&ctrls->controls;
45328 *array_size = sizeof(struct v4l2_ext_control)
45329 * ctrls->count;
45330@@ -2349,7 +2350,7 @@ video_usercopy(struct file *file, unsigned int cmd, unsigned long arg,
45331 }
45332
45333 if (has_array_args) {
45334- *kernel_ptr = user_ptr;
45335+ *kernel_ptr = (void __force_kernel *)user_ptr;
45336 if (copy_to_user(user_ptr, mbuf, array_size))
45337 err = -EFAULT;
45338 goto out_array_args;
45339diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
45340index 570b18a..f880314 100644
45341--- a/drivers/message/fusion/mptbase.c
45342+++ b/drivers/message/fusion/mptbase.c
45343@@ -6755,8 +6755,13 @@ static int mpt_iocinfo_proc_show(struct seq_file *m, void *v)
45344 seq_printf(m, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
45345 seq_printf(m, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
45346
45347+#ifdef CONFIG_GRKERNSEC_HIDESYM
45348+ seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n", NULL, NULL);
45349+#else
45350 seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
45351 (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
45352+#endif
45353+
45354 /*
45355 * Rounding UP to nearest 4-kB boundary here...
45356 */
45357@@ -6769,7 +6774,11 @@ static int mpt_iocinfo_proc_show(struct seq_file *m, void *v)
45358 ioc->facts.GlobalCredits);
45359
45360 seq_printf(m, " Frames @ 0x%p (Dma @ 0x%p)\n",
45361+#ifdef CONFIG_GRKERNSEC_HIDESYM
45362+ NULL, NULL);
45363+#else
45364 (void *)ioc->alloc, (void *)(ulong)ioc->alloc_dma);
45365+#endif
45366 sz = (ioc->reply_sz * ioc->reply_depth) + 128;
45367 seq_printf(m, " {CurRepSz=%d} x {CurRepDepth=%d} = %d bytes ^= 0x%x\n",
45368 ioc->reply_sz, ioc->reply_depth, ioc->reply_sz*ioc->reply_depth, sz);
45369diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
45370index 00d339c..2ea899d 100644
45371--- a/drivers/message/fusion/mptsas.c
45372+++ b/drivers/message/fusion/mptsas.c
45373@@ -446,6 +446,23 @@ mptsas_is_end_device(struct mptsas_devinfo * attached)
45374 return 0;
45375 }
45376
45377+static inline void
45378+mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
45379+{
45380+ if (phy_info->port_details) {
45381+ phy_info->port_details->rphy = rphy;
45382+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
45383+ ioc->name, rphy));
45384+ }
45385+
45386+ if (rphy) {
45387+ dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
45388+ &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
45389+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
45390+ ioc->name, rphy, rphy->dev.release));
45391+ }
45392+}
45393+
45394 /* no mutex */
45395 static void
45396 mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
45397@@ -484,23 +501,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *phy_info)
45398 return NULL;
45399 }
45400
45401-static inline void
45402-mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
45403-{
45404- if (phy_info->port_details) {
45405- phy_info->port_details->rphy = rphy;
45406- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
45407- ioc->name, rphy));
45408- }
45409-
45410- if (rphy) {
45411- dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
45412- &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
45413- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
45414- ioc->name, rphy, rphy->dev.release));
45415- }
45416-}
45417-
45418 static inline struct sas_port *
45419 mptsas_get_port(struct mptsas_phyinfo *phy_info)
45420 {
45421diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
45422index 727819c..ad74694 100644
45423--- a/drivers/message/fusion/mptscsih.c
45424+++ b/drivers/message/fusion/mptscsih.c
45425@@ -1271,15 +1271,16 @@ mptscsih_info(struct Scsi_Host *SChost)
45426
45427 h = shost_priv(SChost);
45428
45429- if (h) {
45430- if (h->info_kbuf == NULL)
45431- if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
45432- return h->info_kbuf;
45433- h->info_kbuf[0] = '\0';
45434+ if (!h)
45435+ return NULL;
45436
45437- mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
45438- h->info_kbuf[size-1] = '\0';
45439- }
45440+ if (h->info_kbuf == NULL)
45441+ if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
45442+ return h->info_kbuf;
45443+ h->info_kbuf[0] = '\0';
45444+
45445+ mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
45446+ h->info_kbuf[size-1] = '\0';
45447
45448 return h->info_kbuf;
45449 }
45450diff --git a/drivers/message/i2o/i2o_proc.c b/drivers/message/i2o/i2o_proc.c
45451index b7d87cd..3fb36da 100644
45452--- a/drivers/message/i2o/i2o_proc.c
45453+++ b/drivers/message/i2o/i2o_proc.c
45454@@ -255,12 +255,6 @@ static char *scsi_devices[] = {
45455 "Array Controller Device"
45456 };
45457
45458-static char *chtostr(char *tmp, u8 *chars, int n)
45459-{
45460- tmp[0] = 0;
45461- return strncat(tmp, (char *)chars, n);
45462-}
45463-
45464 static int i2o_report_query_status(struct seq_file *seq, int block_status,
45465 char *group)
45466 {
45467@@ -707,9 +701,9 @@ static int i2o_seq_show_status(struct seq_file *seq, void *v)
45468 static int i2o_seq_show_hw(struct seq_file *seq, void *v)
45469 {
45470 struct i2o_controller *c = (struct i2o_controller *)seq->private;
45471- static u32 work32[5];
45472- static u8 *work8 = (u8 *) work32;
45473- static u16 *work16 = (u16 *) work32;
45474+ u32 work32[5];
45475+ u8 *work8 = (u8 *) work32;
45476+ u16 *work16 = (u16 *) work32;
45477 int token;
45478 u32 hwcap;
45479
45480@@ -790,7 +784,6 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
45481 } *result;
45482
45483 i2o_exec_execute_ddm_table ddm_table;
45484- char tmp[28 + 1];
45485
45486 result = kmalloc(sizeof(*result), GFP_KERNEL);
45487 if (!result)
45488@@ -825,8 +818,7 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
45489
45490 seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
45491 seq_printf(seq, "%-#8x", ddm_table.module_id);
45492- seq_printf(seq, "%-29s",
45493- chtostr(tmp, ddm_table.module_name_version, 28));
45494+ seq_printf(seq, "%-.28s", ddm_table.module_name_version);
45495 seq_printf(seq, "%9d ", ddm_table.data_size);
45496 seq_printf(seq, "%8d", ddm_table.code_size);
45497
45498@@ -893,7 +885,6 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
45499
45500 i2o_driver_result_table *result;
45501 i2o_driver_store_table *dst;
45502- char tmp[28 + 1];
45503
45504 result = kmalloc(sizeof(i2o_driver_result_table), GFP_KERNEL);
45505 if (result == NULL)
45506@@ -928,9 +919,8 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
45507
45508 seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
45509 seq_printf(seq, "%-#8x", dst->module_id);
45510- seq_printf(seq, "%-29s",
45511- chtostr(tmp, dst->module_name_version, 28));
45512- seq_printf(seq, "%-9s", chtostr(tmp, dst->date, 8));
45513+ seq_printf(seq, "%-.28s", dst->module_name_version);
45514+ seq_printf(seq, "%-.8s", dst->date);
45515 seq_printf(seq, "%8d ", dst->module_size);
45516 seq_printf(seq, "%8d ", dst->mpb_size);
45517 seq_printf(seq, "0x%04x", dst->module_flags);
45518@@ -1246,11 +1236,10 @@ static int i2o_seq_show_authorized_users(struct seq_file *seq, void *v)
45519 static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
45520 {
45521 struct i2o_device *d = (struct i2o_device *)seq->private;
45522- static u32 work32[128]; // allow for "stuff" + up to 256 byte (max) serial number
45523+ u32 work32[128]; // allow for "stuff" + up to 256 byte (max) serial number
45524 // == (allow) 512d bytes (max)
45525- static u16 *work16 = (u16 *) work32;
45526+ u16 *work16 = (u16 *) work32;
45527 int token;
45528- char tmp[16 + 1];
45529
45530 token = i2o_parm_field_get(d, 0xF100, -1, &work32, sizeof(work32));
45531
45532@@ -1262,14 +1251,10 @@ static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
45533 seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
45534 seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
45535 seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
45536- seq_printf(seq, "Vendor info : %s\n",
45537- chtostr(tmp, (u8 *) (work32 + 2), 16));
45538- seq_printf(seq, "Product info : %s\n",
45539- chtostr(tmp, (u8 *) (work32 + 6), 16));
45540- seq_printf(seq, "Description : %s\n",
45541- chtostr(tmp, (u8 *) (work32 + 10), 16));
45542- seq_printf(seq, "Product rev. : %s\n",
45543- chtostr(tmp, (u8 *) (work32 + 14), 8));
45544+ seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2));
45545+ seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6));
45546+ seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10));
45547+ seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14));
45548
45549 seq_printf(seq, "Serial number : ");
45550 print_serial_number(seq, (u8 *) (work32 + 16),
45551@@ -1306,8 +1291,6 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
45552 u8 pad[256]; // allow up to 256 byte (max) serial number
45553 } result;
45554
45555- char tmp[24 + 1];
45556-
45557 token = i2o_parm_field_get(d, 0xF101, -1, &result, sizeof(result));
45558
45559 if (token < 0) {
45560@@ -1316,10 +1299,8 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
45561 }
45562
45563 seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
45564- seq_printf(seq, "Module name : %s\n",
45565- chtostr(tmp, result.module_name, 24));
45566- seq_printf(seq, "Module revision : %s\n",
45567- chtostr(tmp, result.module_rev, 8));
45568+ seq_printf(seq, "Module name : %.24s\n", result.module_name);
45569+ seq_printf(seq, "Module revision : %.8s\n", result.module_rev);
45570
45571 seq_printf(seq, "Serial number : ");
45572 print_serial_number(seq, result.serial_number, sizeof(result) - 36);
45573@@ -1343,8 +1324,6 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
45574 u8 instance_number[4];
45575 } result;
45576
45577- char tmp[64 + 1];
45578-
45579 token = i2o_parm_field_get(d, 0xF102, -1, &result, sizeof(result));
45580
45581 if (token < 0) {
45582@@ -1352,14 +1331,10 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
45583 return 0;
45584 }
45585
45586- seq_printf(seq, "Device name : %s\n",
45587- chtostr(tmp, result.device_name, 64));
45588- seq_printf(seq, "Service name : %s\n",
45589- chtostr(tmp, result.service_name, 64));
45590- seq_printf(seq, "Physical name : %s\n",
45591- chtostr(tmp, result.physical_location, 64));
45592- seq_printf(seq, "Instance number : %s\n",
45593- chtostr(tmp, result.instance_number, 4));
45594+ seq_printf(seq, "Device name : %.64s\n", result.device_name);
45595+ seq_printf(seq, "Service name : %.64s\n", result.service_name);
45596+ seq_printf(seq, "Physical name : %.64s\n", result.physical_location);
45597+ seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
45598
45599 return 0;
45600 }
45601@@ -1368,9 +1343,9 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
45602 static int i2o_seq_show_sgl_limits(struct seq_file *seq, void *v)
45603 {
45604 struct i2o_device *d = (struct i2o_device *)seq->private;
45605- static u32 work32[12];
45606- static u16 *work16 = (u16 *) work32;
45607- static u8 *work8 = (u8 *) work32;
45608+ u32 work32[12];
45609+ u16 *work16 = (u16 *) work32;
45610+ u8 *work8 = (u8 *) work32;
45611 int token;
45612
45613 token = i2o_parm_field_get(d, 0xF103, -1, &work32, sizeof(work32));
45614diff --git a/drivers/message/i2o/iop.c b/drivers/message/i2o/iop.c
45615index a8c08f3..155fe3d 100644
45616--- a/drivers/message/i2o/iop.c
45617+++ b/drivers/message/i2o/iop.c
45618@@ -111,10 +111,10 @@ u32 i2o_cntxt_list_add(struct i2o_controller * c, void *ptr)
45619
45620 spin_lock_irqsave(&c->context_list_lock, flags);
45621
45622- if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
45623- atomic_inc(&c->context_list_counter);
45624+ if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
45625+ atomic_inc_unchecked(&c->context_list_counter);
45626
45627- entry->context = atomic_read(&c->context_list_counter);
45628+ entry->context = atomic_read_unchecked(&c->context_list_counter);
45629
45630 list_add(&entry->list, &c->context_list);
45631
45632@@ -1077,7 +1077,7 @@ struct i2o_controller *i2o_iop_alloc(void)
45633
45634 #if BITS_PER_LONG == 64
45635 spin_lock_init(&c->context_list_lock);
45636- atomic_set(&c->context_list_counter, 0);
45637+ atomic_set_unchecked(&c->context_list_counter, 0);
45638 INIT_LIST_HEAD(&c->context_list);
45639 #endif
45640
45641diff --git a/drivers/mfd/ab8500-debugfs.c b/drivers/mfd/ab8500-debugfs.c
45642index d1a22aa..d0f7bf7 100644
45643--- a/drivers/mfd/ab8500-debugfs.c
45644+++ b/drivers/mfd/ab8500-debugfs.c
45645@@ -100,7 +100,7 @@ static int irq_last;
45646 static u32 *irq_count;
45647 static int num_irqs;
45648
45649-static struct device_attribute **dev_attr;
45650+static device_attribute_no_const **dev_attr;
45651 static char **event_name;
45652
45653 static u8 avg_sample = SAMPLE_16;
45654diff --git a/drivers/mfd/max8925-i2c.c b/drivers/mfd/max8925-i2c.c
45655index a83eed5..62a58a9 100644
45656--- a/drivers/mfd/max8925-i2c.c
45657+++ b/drivers/mfd/max8925-i2c.c
45658@@ -152,7 +152,7 @@ static int max8925_probe(struct i2c_client *client,
45659 const struct i2c_device_id *id)
45660 {
45661 struct max8925_platform_data *pdata = dev_get_platdata(&client->dev);
45662- static struct max8925_chip *chip;
45663+ struct max8925_chip *chip;
45664 struct device_node *node = client->dev.of_node;
45665
45666 if (node && !pdata) {
45667diff --git a/drivers/mfd/tps65910.c b/drivers/mfd/tps65910.c
45668index d657331..0d9a80f 100644
45669--- a/drivers/mfd/tps65910.c
45670+++ b/drivers/mfd/tps65910.c
45671@@ -230,7 +230,7 @@ static int tps65910_irq_init(struct tps65910 *tps65910, int irq,
45672 struct tps65910_platform_data *pdata)
45673 {
45674 int ret = 0;
45675- static struct regmap_irq_chip *tps6591x_irqs_chip;
45676+ struct regmap_irq_chip *tps6591x_irqs_chip;
45677
45678 if (!irq) {
45679 dev_warn(tps65910->dev, "No interrupt support, no core IRQ\n");
45680diff --git a/drivers/mfd/twl4030-irq.c b/drivers/mfd/twl4030-irq.c
45681index 9aa6d1e..1631bfc 100644
45682--- a/drivers/mfd/twl4030-irq.c
45683+++ b/drivers/mfd/twl4030-irq.c
45684@@ -35,6 +35,7 @@
45685 #include <linux/of.h>
45686 #include <linux/irqdomain.h>
45687 #include <linux/i2c/twl.h>
45688+#include <asm/pgtable.h>
45689
45690 #include "twl-core.h"
45691
45692@@ -726,10 +727,12 @@ int twl4030_init_irq(struct device *dev, int irq_num)
45693 * Install an irq handler for each of the SIH modules;
45694 * clone dummy irq_chip since PIH can't *do* anything
45695 */
45696- twl4030_irq_chip = dummy_irq_chip;
45697- twl4030_irq_chip.name = "twl4030";
45698+ pax_open_kernel();
45699+ memcpy((void *)&twl4030_irq_chip, &dummy_irq_chip, sizeof twl4030_irq_chip);
45700+ *(const char **)&twl4030_irq_chip.name = "twl4030";
45701
45702- twl4030_sih_irq_chip.irq_ack = dummy_irq_chip.irq_ack;
45703+ *(void **)&twl4030_sih_irq_chip.irq_ack = dummy_irq_chip.irq_ack;
45704+ pax_close_kernel();
45705
45706 for (i = irq_base; i < irq_end; i++) {
45707 irq_set_chip_and_handler(i, &twl4030_irq_chip,
45708diff --git a/drivers/misc/c2port/core.c b/drivers/misc/c2port/core.c
45709index 464419b..64bae8d 100644
45710--- a/drivers/misc/c2port/core.c
45711+++ b/drivers/misc/c2port/core.c
45712@@ -922,7 +922,9 @@ struct c2port_device *c2port_device_register(char *name,
45713 goto error_idr_alloc;
45714 c2dev->id = ret;
45715
45716- bin_attr_flash_data.size = ops->blocks_num * ops->block_size;
45717+ pax_open_kernel();
45718+ *(size_t *)&bin_attr_flash_data.size = ops->blocks_num * ops->block_size;
45719+ pax_close_kernel();
45720
45721 c2dev->dev = device_create(c2port_class, NULL, 0, c2dev,
45722 "c2port%d", c2dev->id);
45723diff --git a/drivers/misc/eeprom/sunxi_sid.c b/drivers/misc/eeprom/sunxi_sid.c
45724index 9c34e57..b981cda 100644
45725--- a/drivers/misc/eeprom/sunxi_sid.c
45726+++ b/drivers/misc/eeprom/sunxi_sid.c
45727@@ -127,7 +127,9 @@ static int sunxi_sid_probe(struct platform_device *pdev)
45728
45729 platform_set_drvdata(pdev, sid_data);
45730
45731- sid_bin_attr.size = sid_data->keysize;
45732+ pax_open_kernel();
45733+ *(size_t *)&sid_bin_attr.size = sid_data->keysize;
45734+ pax_close_kernel();
45735 if (device_create_bin_file(&pdev->dev, &sid_bin_attr))
45736 return -ENODEV;
45737
45738diff --git a/drivers/misc/kgdbts.c b/drivers/misc/kgdbts.c
45739index 36f5d52..32311c3 100644
45740--- a/drivers/misc/kgdbts.c
45741+++ b/drivers/misc/kgdbts.c
45742@@ -834,7 +834,7 @@ static void run_plant_and_detach_test(int is_early)
45743 char before[BREAK_INSTR_SIZE];
45744 char after[BREAK_INSTR_SIZE];
45745
45746- probe_kernel_read(before, (char *)kgdbts_break_test,
45747+ probe_kernel_read(before, ktla_ktva((char *)kgdbts_break_test),
45748 BREAK_INSTR_SIZE);
45749 init_simple_test();
45750 ts.tst = plant_and_detach_test;
45751@@ -842,7 +842,7 @@ static void run_plant_and_detach_test(int is_early)
45752 /* Activate test with initial breakpoint */
45753 if (!is_early)
45754 kgdb_breakpoint();
45755- probe_kernel_read(after, (char *)kgdbts_break_test,
45756+ probe_kernel_read(after, ktla_ktva((char *)kgdbts_break_test),
45757 BREAK_INSTR_SIZE);
45758 if (memcmp(before, after, BREAK_INSTR_SIZE)) {
45759 printk(KERN_CRIT "kgdbts: ERROR kgdb corrupted memory\n");
45760diff --git a/drivers/misc/lis3lv02d/lis3lv02d.c b/drivers/misc/lis3lv02d/lis3lv02d.c
45761index 036effe..b3a6336 100644
45762--- a/drivers/misc/lis3lv02d/lis3lv02d.c
45763+++ b/drivers/misc/lis3lv02d/lis3lv02d.c
45764@@ -498,7 +498,7 @@ static irqreturn_t lis302dl_interrupt(int irq, void *data)
45765 * the lid is closed. This leads to interrupts as soon as a little move
45766 * is done.
45767 */
45768- atomic_inc(&lis3->count);
45769+ atomic_inc_unchecked(&lis3->count);
45770
45771 wake_up_interruptible(&lis3->misc_wait);
45772 kill_fasync(&lis3->async_queue, SIGIO, POLL_IN);
45773@@ -584,7 +584,7 @@ static int lis3lv02d_misc_open(struct inode *inode, struct file *file)
45774 if (lis3->pm_dev)
45775 pm_runtime_get_sync(lis3->pm_dev);
45776
45777- atomic_set(&lis3->count, 0);
45778+ atomic_set_unchecked(&lis3->count, 0);
45779 return 0;
45780 }
45781
45782@@ -616,7 +616,7 @@ static ssize_t lis3lv02d_misc_read(struct file *file, char __user *buf,
45783 add_wait_queue(&lis3->misc_wait, &wait);
45784 while (true) {
45785 set_current_state(TASK_INTERRUPTIBLE);
45786- data = atomic_xchg(&lis3->count, 0);
45787+ data = atomic_xchg_unchecked(&lis3->count, 0);
45788 if (data)
45789 break;
45790
45791@@ -657,7 +657,7 @@ static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
45792 struct lis3lv02d, miscdev);
45793
45794 poll_wait(file, &lis3->misc_wait, wait);
45795- if (atomic_read(&lis3->count))
45796+ if (atomic_read_unchecked(&lis3->count))
45797 return POLLIN | POLLRDNORM;
45798 return 0;
45799 }
45800diff --git a/drivers/misc/lis3lv02d/lis3lv02d.h b/drivers/misc/lis3lv02d/lis3lv02d.h
45801index c439c82..1f20f57 100644
45802--- a/drivers/misc/lis3lv02d/lis3lv02d.h
45803+++ b/drivers/misc/lis3lv02d/lis3lv02d.h
45804@@ -297,7 +297,7 @@ struct lis3lv02d {
45805 struct input_polled_dev *idev; /* input device */
45806 struct platform_device *pdev; /* platform device */
45807 struct regulator_bulk_data regulators[2];
45808- atomic_t count; /* interrupt count after last read */
45809+ atomic_unchecked_t count; /* interrupt count after last read */
45810 union axis_conversion ac; /* hw -> logical axis */
45811 int mapped_btns[3];
45812
45813diff --git a/drivers/misc/sgi-gru/gruhandles.c b/drivers/misc/sgi-gru/gruhandles.c
45814index 2f30bad..c4c13d0 100644
45815--- a/drivers/misc/sgi-gru/gruhandles.c
45816+++ b/drivers/misc/sgi-gru/gruhandles.c
45817@@ -44,8 +44,8 @@ static void update_mcs_stats(enum mcs_op op, unsigned long clks)
45818 unsigned long nsec;
45819
45820 nsec = CLKS2NSEC(clks);
45821- atomic_long_inc(&mcs_op_statistics[op].count);
45822- atomic_long_add(nsec, &mcs_op_statistics[op].total);
45823+ atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
45824+ atomic_long_add_unchecked(nsec, &mcs_op_statistics[op].total);
45825 if (mcs_op_statistics[op].max < nsec)
45826 mcs_op_statistics[op].max = nsec;
45827 }
45828diff --git a/drivers/misc/sgi-gru/gruprocfs.c b/drivers/misc/sgi-gru/gruprocfs.c
45829index 4f76359..cdfcb2e 100644
45830--- a/drivers/misc/sgi-gru/gruprocfs.c
45831+++ b/drivers/misc/sgi-gru/gruprocfs.c
45832@@ -32,9 +32,9 @@
45833
45834 #define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
45835
45836-static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
45837+static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
45838 {
45839- unsigned long val = atomic_long_read(v);
45840+ unsigned long val = atomic_long_read_unchecked(v);
45841
45842 seq_printf(s, "%16lu %s\n", val, id);
45843 }
45844@@ -134,8 +134,8 @@ static int mcs_statistics_show(struct seq_file *s, void *p)
45845
45846 seq_printf(s, "%-20s%12s%12s%12s\n", "#id", "count", "aver-clks", "max-clks");
45847 for (op = 0; op < mcsop_last; op++) {
45848- count = atomic_long_read(&mcs_op_statistics[op].count);
45849- total = atomic_long_read(&mcs_op_statistics[op].total);
45850+ count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
45851+ total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
45852 max = mcs_op_statistics[op].max;
45853 seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
45854 count ? total / count : 0, max);
45855diff --git a/drivers/misc/sgi-gru/grutables.h b/drivers/misc/sgi-gru/grutables.h
45856index 5c3ce24..4915ccb 100644
45857--- a/drivers/misc/sgi-gru/grutables.h
45858+++ b/drivers/misc/sgi-gru/grutables.h
45859@@ -167,82 +167,82 @@ extern unsigned int gru_max_gids;
45860 * GRU statistics.
45861 */
45862 struct gru_stats_s {
45863- atomic_long_t vdata_alloc;
45864- atomic_long_t vdata_free;
45865- atomic_long_t gts_alloc;
45866- atomic_long_t gts_free;
45867- atomic_long_t gms_alloc;
45868- atomic_long_t gms_free;
45869- atomic_long_t gts_double_allocate;
45870- atomic_long_t assign_context;
45871- atomic_long_t assign_context_failed;
45872- atomic_long_t free_context;
45873- atomic_long_t load_user_context;
45874- atomic_long_t load_kernel_context;
45875- atomic_long_t lock_kernel_context;
45876- atomic_long_t unlock_kernel_context;
45877- atomic_long_t steal_user_context;
45878- atomic_long_t steal_kernel_context;
45879- atomic_long_t steal_context_failed;
45880- atomic_long_t nopfn;
45881- atomic_long_t asid_new;
45882- atomic_long_t asid_next;
45883- atomic_long_t asid_wrap;
45884- atomic_long_t asid_reuse;
45885- atomic_long_t intr;
45886- atomic_long_t intr_cbr;
45887- atomic_long_t intr_tfh;
45888- atomic_long_t intr_spurious;
45889- atomic_long_t intr_mm_lock_failed;
45890- atomic_long_t call_os;
45891- atomic_long_t call_os_wait_queue;
45892- atomic_long_t user_flush_tlb;
45893- atomic_long_t user_unload_context;
45894- atomic_long_t user_exception;
45895- atomic_long_t set_context_option;
45896- atomic_long_t check_context_retarget_intr;
45897- atomic_long_t check_context_unload;
45898- atomic_long_t tlb_dropin;
45899- atomic_long_t tlb_preload_page;
45900- atomic_long_t tlb_dropin_fail_no_asid;
45901- atomic_long_t tlb_dropin_fail_upm;
45902- atomic_long_t tlb_dropin_fail_invalid;
45903- atomic_long_t tlb_dropin_fail_range_active;
45904- atomic_long_t tlb_dropin_fail_idle;
45905- atomic_long_t tlb_dropin_fail_fmm;
45906- atomic_long_t tlb_dropin_fail_no_exception;
45907- atomic_long_t tfh_stale_on_fault;
45908- atomic_long_t mmu_invalidate_range;
45909- atomic_long_t mmu_invalidate_page;
45910- atomic_long_t flush_tlb;
45911- atomic_long_t flush_tlb_gru;
45912- atomic_long_t flush_tlb_gru_tgh;
45913- atomic_long_t flush_tlb_gru_zero_asid;
45914+ atomic_long_unchecked_t vdata_alloc;
45915+ atomic_long_unchecked_t vdata_free;
45916+ atomic_long_unchecked_t gts_alloc;
45917+ atomic_long_unchecked_t gts_free;
45918+ atomic_long_unchecked_t gms_alloc;
45919+ atomic_long_unchecked_t gms_free;
45920+ atomic_long_unchecked_t gts_double_allocate;
45921+ atomic_long_unchecked_t assign_context;
45922+ atomic_long_unchecked_t assign_context_failed;
45923+ atomic_long_unchecked_t free_context;
45924+ atomic_long_unchecked_t load_user_context;
45925+ atomic_long_unchecked_t load_kernel_context;
45926+ atomic_long_unchecked_t lock_kernel_context;
45927+ atomic_long_unchecked_t unlock_kernel_context;
45928+ atomic_long_unchecked_t steal_user_context;
45929+ atomic_long_unchecked_t steal_kernel_context;
45930+ atomic_long_unchecked_t steal_context_failed;
45931+ atomic_long_unchecked_t nopfn;
45932+ atomic_long_unchecked_t asid_new;
45933+ atomic_long_unchecked_t asid_next;
45934+ atomic_long_unchecked_t asid_wrap;
45935+ atomic_long_unchecked_t asid_reuse;
45936+ atomic_long_unchecked_t intr;
45937+ atomic_long_unchecked_t intr_cbr;
45938+ atomic_long_unchecked_t intr_tfh;
45939+ atomic_long_unchecked_t intr_spurious;
45940+ atomic_long_unchecked_t intr_mm_lock_failed;
45941+ atomic_long_unchecked_t call_os;
45942+ atomic_long_unchecked_t call_os_wait_queue;
45943+ atomic_long_unchecked_t user_flush_tlb;
45944+ atomic_long_unchecked_t user_unload_context;
45945+ atomic_long_unchecked_t user_exception;
45946+ atomic_long_unchecked_t set_context_option;
45947+ atomic_long_unchecked_t check_context_retarget_intr;
45948+ atomic_long_unchecked_t check_context_unload;
45949+ atomic_long_unchecked_t tlb_dropin;
45950+ atomic_long_unchecked_t tlb_preload_page;
45951+ atomic_long_unchecked_t tlb_dropin_fail_no_asid;
45952+ atomic_long_unchecked_t tlb_dropin_fail_upm;
45953+ atomic_long_unchecked_t tlb_dropin_fail_invalid;
45954+ atomic_long_unchecked_t tlb_dropin_fail_range_active;
45955+ atomic_long_unchecked_t tlb_dropin_fail_idle;
45956+ atomic_long_unchecked_t tlb_dropin_fail_fmm;
45957+ atomic_long_unchecked_t tlb_dropin_fail_no_exception;
45958+ atomic_long_unchecked_t tfh_stale_on_fault;
45959+ atomic_long_unchecked_t mmu_invalidate_range;
45960+ atomic_long_unchecked_t mmu_invalidate_page;
45961+ atomic_long_unchecked_t flush_tlb;
45962+ atomic_long_unchecked_t flush_tlb_gru;
45963+ atomic_long_unchecked_t flush_tlb_gru_tgh;
45964+ atomic_long_unchecked_t flush_tlb_gru_zero_asid;
45965
45966- atomic_long_t copy_gpa;
45967- atomic_long_t read_gpa;
45968+ atomic_long_unchecked_t copy_gpa;
45969+ atomic_long_unchecked_t read_gpa;
45970
45971- atomic_long_t mesq_receive;
45972- atomic_long_t mesq_receive_none;
45973- atomic_long_t mesq_send;
45974- atomic_long_t mesq_send_failed;
45975- atomic_long_t mesq_noop;
45976- atomic_long_t mesq_send_unexpected_error;
45977- atomic_long_t mesq_send_lb_overflow;
45978- atomic_long_t mesq_send_qlimit_reached;
45979- atomic_long_t mesq_send_amo_nacked;
45980- atomic_long_t mesq_send_put_nacked;
45981- atomic_long_t mesq_page_overflow;
45982- atomic_long_t mesq_qf_locked;
45983- atomic_long_t mesq_qf_noop_not_full;
45984- atomic_long_t mesq_qf_switch_head_failed;
45985- atomic_long_t mesq_qf_unexpected_error;
45986- atomic_long_t mesq_noop_unexpected_error;
45987- atomic_long_t mesq_noop_lb_overflow;
45988- atomic_long_t mesq_noop_qlimit_reached;
45989- atomic_long_t mesq_noop_amo_nacked;
45990- atomic_long_t mesq_noop_put_nacked;
45991- atomic_long_t mesq_noop_page_overflow;
45992+ atomic_long_unchecked_t mesq_receive;
45993+ atomic_long_unchecked_t mesq_receive_none;
45994+ atomic_long_unchecked_t mesq_send;
45995+ atomic_long_unchecked_t mesq_send_failed;
45996+ atomic_long_unchecked_t mesq_noop;
45997+ atomic_long_unchecked_t mesq_send_unexpected_error;
45998+ atomic_long_unchecked_t mesq_send_lb_overflow;
45999+ atomic_long_unchecked_t mesq_send_qlimit_reached;
46000+ atomic_long_unchecked_t mesq_send_amo_nacked;
46001+ atomic_long_unchecked_t mesq_send_put_nacked;
46002+ atomic_long_unchecked_t mesq_page_overflow;
46003+ atomic_long_unchecked_t mesq_qf_locked;
46004+ atomic_long_unchecked_t mesq_qf_noop_not_full;
46005+ atomic_long_unchecked_t mesq_qf_switch_head_failed;
46006+ atomic_long_unchecked_t mesq_qf_unexpected_error;
46007+ atomic_long_unchecked_t mesq_noop_unexpected_error;
46008+ atomic_long_unchecked_t mesq_noop_lb_overflow;
46009+ atomic_long_unchecked_t mesq_noop_qlimit_reached;
46010+ atomic_long_unchecked_t mesq_noop_amo_nacked;
46011+ atomic_long_unchecked_t mesq_noop_put_nacked;
46012+ atomic_long_unchecked_t mesq_noop_page_overflow;
46013
46014 };
46015
46016@@ -251,8 +251,8 @@ enum mcs_op {cchop_allocate, cchop_start, cchop_interrupt, cchop_interrupt_sync,
46017 tghop_invalidate, mcsop_last};
46018
46019 struct mcs_op_statistic {
46020- atomic_long_t count;
46021- atomic_long_t total;
46022+ atomic_long_unchecked_t count;
46023+ atomic_long_unchecked_t total;
46024 unsigned long max;
46025 };
46026
46027@@ -275,7 +275,7 @@ extern struct mcs_op_statistic mcs_op_statistics[mcsop_last];
46028
46029 #define STAT(id) do { \
46030 if (gru_options & OPT_STATS) \
46031- atomic_long_inc(&gru_stats.id); \
46032+ atomic_long_inc_unchecked(&gru_stats.id); \
46033 } while (0)
46034
46035 #ifdef CONFIG_SGI_GRU_DEBUG
46036diff --git a/drivers/misc/sgi-xp/xp.h b/drivers/misc/sgi-xp/xp.h
46037index c862cd4..0d176fe 100644
46038--- a/drivers/misc/sgi-xp/xp.h
46039+++ b/drivers/misc/sgi-xp/xp.h
46040@@ -288,7 +288,7 @@ struct xpc_interface {
46041 xpc_notify_func, void *);
46042 void (*received) (short, int, void *);
46043 enum xp_retval (*partid_to_nasids) (short, void *);
46044-};
46045+} __no_const;
46046
46047 extern struct xpc_interface xpc_interface;
46048
46049diff --git a/drivers/misc/sgi-xp/xp_main.c b/drivers/misc/sgi-xp/xp_main.c
46050index 01be66d..e3a0c7e 100644
46051--- a/drivers/misc/sgi-xp/xp_main.c
46052+++ b/drivers/misc/sgi-xp/xp_main.c
46053@@ -78,13 +78,13 @@ xpc_notloaded(void)
46054 }
46055
46056 struct xpc_interface xpc_interface = {
46057- (void (*)(int))xpc_notloaded,
46058- (void (*)(int))xpc_notloaded,
46059- (enum xp_retval(*)(short, int, u32, void *, u16))xpc_notloaded,
46060- (enum xp_retval(*)(short, int, u32, void *, u16, xpc_notify_func,
46061+ .connect = (void (*)(int))xpc_notloaded,
46062+ .disconnect = (void (*)(int))xpc_notloaded,
46063+ .send = (enum xp_retval(*)(short, int, u32, void *, u16))xpc_notloaded,
46064+ .send_notify = (enum xp_retval(*)(short, int, u32, void *, u16, xpc_notify_func,
46065 void *))xpc_notloaded,
46066- (void (*)(short, int, void *))xpc_notloaded,
46067- (enum xp_retval(*)(short, void *))xpc_notloaded
46068+ .received = (void (*)(short, int, void *))xpc_notloaded,
46069+ .partid_to_nasids = (enum xp_retval(*)(short, void *))xpc_notloaded
46070 };
46071 EXPORT_SYMBOL_GPL(xpc_interface);
46072
46073diff --git a/drivers/misc/sgi-xp/xpc.h b/drivers/misc/sgi-xp/xpc.h
46074index b94d5f7..7f494c5 100644
46075--- a/drivers/misc/sgi-xp/xpc.h
46076+++ b/drivers/misc/sgi-xp/xpc.h
46077@@ -835,6 +835,7 @@ struct xpc_arch_operations {
46078 void (*received_payload) (struct xpc_channel *, void *);
46079 void (*notify_senders_of_disconnect) (struct xpc_channel *);
46080 };
46081+typedef struct xpc_arch_operations __no_const xpc_arch_operations_no_const;
46082
46083 /* struct xpc_partition act_state values (for XPC HB) */
46084
46085@@ -876,7 +877,7 @@ extern struct xpc_registration xpc_registrations[];
46086 /* found in xpc_main.c */
46087 extern struct device *xpc_part;
46088 extern struct device *xpc_chan;
46089-extern struct xpc_arch_operations xpc_arch_ops;
46090+extern xpc_arch_operations_no_const xpc_arch_ops;
46091 extern int xpc_disengage_timelimit;
46092 extern int xpc_disengage_timedout;
46093 extern int xpc_activate_IRQ_rcvd;
46094diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c
46095index 82dc574..8539ab2 100644
46096--- a/drivers/misc/sgi-xp/xpc_main.c
46097+++ b/drivers/misc/sgi-xp/xpc_main.c
46098@@ -166,7 +166,7 @@ static struct notifier_block xpc_die_notifier = {
46099 .notifier_call = xpc_system_die,
46100 };
46101
46102-struct xpc_arch_operations xpc_arch_ops;
46103+xpc_arch_operations_no_const xpc_arch_ops;
46104
46105 /*
46106 * Timer function to enforce the timelimit on the partition disengage.
46107@@ -1210,7 +1210,7 @@ xpc_system_die(struct notifier_block *nb, unsigned long event, void *_die_args)
46108
46109 if (((die_args->trapnr == X86_TRAP_MF) ||
46110 (die_args->trapnr == X86_TRAP_XF)) &&
46111- !user_mode_vm(die_args->regs))
46112+ !user_mode(die_args->regs))
46113 xpc_die_deactivate();
46114
46115 break;
46116diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
46117index 7b5424f..ed1d6ac 100644
46118--- a/drivers/mmc/card/block.c
46119+++ b/drivers/mmc/card/block.c
46120@@ -575,7 +575,7 @@ static int mmc_blk_ioctl_cmd(struct block_device *bdev,
46121 if (idata->ic.postsleep_min_us)
46122 usleep_range(idata->ic.postsleep_min_us, idata->ic.postsleep_max_us);
46123
46124- if (copy_to_user(&(ic_ptr->response), cmd.resp, sizeof(cmd.resp))) {
46125+ if (copy_to_user(ic_ptr->response, cmd.resp, sizeof(cmd.resp))) {
46126 err = -EFAULT;
46127 goto cmd_rel_host;
46128 }
46129diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c
46130index e5b5eeb..7bf2212 100644
46131--- a/drivers/mmc/core/mmc_ops.c
46132+++ b/drivers/mmc/core/mmc_ops.c
46133@@ -247,7 +247,7 @@ mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host,
46134 void *data_buf;
46135 int is_on_stack;
46136
46137- is_on_stack = object_is_on_stack(buf);
46138+ is_on_stack = object_starts_on_stack(buf);
46139 if (is_on_stack) {
46140 /*
46141 * dma onto stack is unsafe/nonportable, but callers to this
46142diff --git a/drivers/mmc/host/dw_mmc.h b/drivers/mmc/host/dw_mmc.h
46143index 6bf24ab..13d0293b 100644
46144--- a/drivers/mmc/host/dw_mmc.h
46145+++ b/drivers/mmc/host/dw_mmc.h
46146@@ -258,5 +258,5 @@ struct dw_mci_drv_data {
46147 int (*parse_dt)(struct dw_mci *host);
46148 int (*execute_tuning)(struct dw_mci_slot *slot, u32 opcode,
46149 struct dw_mci_tuning_data *tuning_data);
46150-};
46151+} __do_const;
46152 #endif /* _DW_MMC_H_ */
46153diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
46154index b931226..df6a085 100644
46155--- a/drivers/mmc/host/mmci.c
46156+++ b/drivers/mmc/host/mmci.c
46157@@ -1504,7 +1504,9 @@ static int mmci_probe(struct amba_device *dev,
46158 }
46159
46160 if (variant->busy_detect) {
46161- mmci_ops.card_busy = mmci_card_busy;
46162+ pax_open_kernel();
46163+ *(void **)&mmci_ops.card_busy = mmci_card_busy;
46164+ pax_close_kernel();
46165 mmci_write_datactrlreg(host, MCI_ST_DPSM_BUSYMODE);
46166 }
46167
46168diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
46169index b841bb7..d82712f5 100644
46170--- a/drivers/mmc/host/sdhci-esdhc-imx.c
46171+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
46172@@ -1031,9 +1031,12 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
46173 host->mmc->caps |= MMC_CAP_1_8V_DDR;
46174 }
46175
46176- if (imx_data->socdata->flags & ESDHC_FLAG_MAN_TUNING)
46177- sdhci_esdhc_ops.platform_execute_tuning =
46178+ if (imx_data->socdata->flags & ESDHC_FLAG_MAN_TUNING) {
46179+ pax_open_kernel();
46180+ *(void **)&sdhci_esdhc_ops.platform_execute_tuning =
46181 esdhc_executing_tuning;
46182+ pax_close_kernel();
46183+ }
46184
46185 if (imx_data->socdata->flags & ESDHC_FLAG_STD_TUNING)
46186 writel(readl(host->ioaddr + ESDHC_TUNING_CTRL) |
46187diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
46188index 6debda9..2ba7427 100644
46189--- a/drivers/mmc/host/sdhci-s3c.c
46190+++ b/drivers/mmc/host/sdhci-s3c.c
46191@@ -668,9 +668,11 @@ static int sdhci_s3c_probe(struct platform_device *pdev)
46192 * we can use overriding functions instead of default.
46193 */
46194 if (host->quirks & SDHCI_QUIRK_NONSTANDARD_CLOCK) {
46195- sdhci_s3c_ops.set_clock = sdhci_cmu_set_clock;
46196- sdhci_s3c_ops.get_min_clock = sdhci_cmu_get_min_clock;
46197- sdhci_s3c_ops.get_max_clock = sdhci_cmu_get_max_clock;
46198+ pax_open_kernel();
46199+ *(void **)&sdhci_s3c_ops.set_clock = sdhci_cmu_set_clock;
46200+ *(void **)&sdhci_s3c_ops.get_min_clock = sdhci_cmu_get_min_clock;
46201+ *(void **)&sdhci_s3c_ops.get_max_clock = sdhci_cmu_get_max_clock;
46202+ pax_close_kernel();
46203 }
46204
46205 /* It supports additional host capabilities if needed */
46206diff --git a/drivers/mtd/chips/cfi_cmdset_0020.c b/drivers/mtd/chips/cfi_cmdset_0020.c
46207index 096993f..f02c23b 100644
46208--- a/drivers/mtd/chips/cfi_cmdset_0020.c
46209+++ b/drivers/mtd/chips/cfi_cmdset_0020.c
46210@@ -669,7 +669,7 @@ cfi_staa_writev(struct mtd_info *mtd, const struct kvec *vecs,
46211 size_t totlen = 0, thislen;
46212 int ret = 0;
46213 size_t buflen = 0;
46214- static char *buffer;
46215+ char *buffer;
46216
46217 if (!ECCBUF_SIZE) {
46218 /* We should fall back to a general writev implementation.
46219diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c
46220index c07cd57..61c4fbd 100644
46221--- a/drivers/mtd/nand/denali.c
46222+++ b/drivers/mtd/nand/denali.c
46223@@ -24,6 +24,7 @@
46224 #include <linux/slab.h>
46225 #include <linux/mtd/mtd.h>
46226 #include <linux/module.h>
46227+#include <linux/slab.h>
46228
46229 #include "denali.h"
46230
46231diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
46232index ca6369f..0ce9fed 100644
46233--- a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
46234+++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
46235@@ -369,7 +369,7 @@ void prepare_data_dma(struct gpmi_nand_data *this, enum dma_data_direction dr)
46236
46237 /* first try to map the upper buffer directly */
46238 if (virt_addr_valid(this->upper_buf) &&
46239- !object_is_on_stack(this->upper_buf)) {
46240+ !object_starts_on_stack(this->upper_buf)) {
46241 sg_init_one(sgl, this->upper_buf, this->upper_len);
46242 ret = dma_map_sg(this->dev, sgl, 1, dr);
46243 if (ret == 0)
46244diff --git a/drivers/mtd/nftlmount.c b/drivers/mtd/nftlmount.c
46245index 51b9d6a..52af9a7 100644
46246--- a/drivers/mtd/nftlmount.c
46247+++ b/drivers/mtd/nftlmount.c
46248@@ -24,6 +24,7 @@
46249 #include <asm/errno.h>
46250 #include <linux/delay.h>
46251 #include <linux/slab.h>
46252+#include <linux/sched.h>
46253 #include <linux/mtd/mtd.h>
46254 #include <linux/mtd/nand.h>
46255 #include <linux/mtd/nftl.h>
46256diff --git a/drivers/mtd/sm_ftl.c b/drivers/mtd/sm_ftl.c
46257index cf49c22..971b133 100644
46258--- a/drivers/mtd/sm_ftl.c
46259+++ b/drivers/mtd/sm_ftl.c
46260@@ -56,7 +56,7 @@ static ssize_t sm_attr_show(struct device *dev, struct device_attribute *attr,
46261 #define SM_CIS_VENDOR_OFFSET 0x59
46262 static struct attribute_group *sm_create_sysfs_attributes(struct sm_ftl *ftl)
46263 {
46264- struct attribute_group *attr_group;
46265+ attribute_group_no_const *attr_group;
46266 struct attribute **attributes;
46267 struct sm_sysfs_attribute *vendor_attribute;
46268 char *vendor;
46269diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
46270index 91ec8cd..562ff5f 100644
46271--- a/drivers/net/bonding/bond_main.c
46272+++ b/drivers/net/bonding/bond_main.c
46273@@ -4552,6 +4552,7 @@ static void __exit bonding_exit(void)
46274
46275 bond_netlink_fini();
46276 unregister_pernet_subsys(&bond_net_ops);
46277+ rtnl_link_unregister(&bond_link_ops);
46278
46279 #ifdef CONFIG_NET_POLL_CONTROLLER
46280 /*
46281diff --git a/drivers/net/bonding/bond_netlink.c b/drivers/net/bonding/bond_netlink.c
46282index 70651f8..7eb1bdf 100644
46283--- a/drivers/net/bonding/bond_netlink.c
46284+++ b/drivers/net/bonding/bond_netlink.c
46285@@ -542,7 +542,7 @@ nla_put_failure:
46286 return -EMSGSIZE;
46287 }
46288
46289-struct rtnl_link_ops bond_link_ops __read_mostly = {
46290+struct rtnl_link_ops bond_link_ops = {
46291 .kind = "bond",
46292 .priv_size = sizeof(struct bonding),
46293 .setup = bond_setup,
46294diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
46295index 9e7d95d..d447b88 100644
46296--- a/drivers/net/can/Kconfig
46297+++ b/drivers/net/can/Kconfig
46298@@ -104,7 +104,7 @@ config CAN_JANZ_ICAN3
46299
46300 config CAN_FLEXCAN
46301 tristate "Support for Freescale FLEXCAN based chips"
46302- depends on ARM || PPC
46303+ depends on (ARM && CPU_LITTLE_ENDIAN) || PPC
46304 ---help---
46305 Say Y here if you want to support for Freescale FlexCAN.
46306
46307diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c
46308index 455d4c3..3353ee7 100644
46309--- a/drivers/net/ethernet/8390/ax88796.c
46310+++ b/drivers/net/ethernet/8390/ax88796.c
46311@@ -889,9 +889,11 @@ static int ax_probe(struct platform_device *pdev)
46312 if (ax->plat->reg_offsets)
46313 ei_local->reg_offset = ax->plat->reg_offsets;
46314 else {
46315+ resource_size_t _mem_size = mem_size;
46316+ do_div(_mem_size, 0x18);
46317 ei_local->reg_offset = ax->reg_offsets;
46318 for (ret = 0; ret < 0x18; ret++)
46319- ax->reg_offsets[ret] = (mem_size / 0x18) * ret;
46320+ ax->reg_offsets[ret] = _mem_size * ret;
46321 }
46322
46323 if (!request_mem_region(mem->start, mem_size, pdev->name)) {
46324diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
46325index a89a40f..5a8a2ac 100644
46326--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
46327+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
46328@@ -1062,7 +1062,7 @@ static inline u8 bnx2x_get_path_func_num(struct bnx2x *bp)
46329 static inline void bnx2x_init_bp_objs(struct bnx2x *bp)
46330 {
46331 /* RX_MODE controlling object */
46332- bnx2x_init_rx_mode_obj(bp, &bp->rx_mode_obj);
46333+ bnx2x_init_rx_mode_obj(bp);
46334
46335 /* multicast configuration controlling object */
46336 bnx2x_init_mcast_obj(bp, &bp->mcast_obj, bp->fp->cl_id, bp->fp->cid,
46337diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
46338index 0fb6ff2..78fd55c 100644
46339--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
46340+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
46341@@ -2329,15 +2329,14 @@ int bnx2x_config_rx_mode(struct bnx2x *bp,
46342 return rc;
46343 }
46344
46345-void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
46346- struct bnx2x_rx_mode_obj *o)
46347+void bnx2x_init_rx_mode_obj(struct bnx2x *bp)
46348 {
46349 if (CHIP_IS_E1x(bp)) {
46350- o->wait_comp = bnx2x_empty_rx_mode_wait;
46351- o->config_rx_mode = bnx2x_set_rx_mode_e1x;
46352+ bp->rx_mode_obj.wait_comp = bnx2x_empty_rx_mode_wait;
46353+ bp->rx_mode_obj.config_rx_mode = bnx2x_set_rx_mode_e1x;
46354 } else {
46355- o->wait_comp = bnx2x_wait_rx_mode_comp_e2;
46356- o->config_rx_mode = bnx2x_set_rx_mode_e2;
46357+ bp->rx_mode_obj.wait_comp = bnx2x_wait_rx_mode_comp_e2;
46358+ bp->rx_mode_obj.config_rx_mode = bnx2x_set_rx_mode_e2;
46359 }
46360 }
46361
46362diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
46363index 00d7f21..2cddec4 100644
46364--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
46365+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
46366@@ -1321,8 +1321,7 @@ int bnx2x_vlan_mac_move(struct bnx2x *bp,
46367
46368 /********************* RX MODE ****************/
46369
46370-void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
46371- struct bnx2x_rx_mode_obj *o);
46372+void bnx2x_init_rx_mode_obj(struct bnx2x *bp);
46373
46374 /**
46375 * bnx2x_config_rx_mode - Send and RX_MODE ramrod according to the provided parameters.
46376diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
46377index 04321e5..b51cdc4 100644
46378--- a/drivers/net/ethernet/broadcom/tg3.h
46379+++ b/drivers/net/ethernet/broadcom/tg3.h
46380@@ -150,6 +150,7 @@
46381 #define CHIPREV_ID_5750_A0 0x4000
46382 #define CHIPREV_ID_5750_A1 0x4001
46383 #define CHIPREV_ID_5750_A3 0x4003
46384+#define CHIPREV_ID_5750_C1 0x4201
46385 #define CHIPREV_ID_5750_C2 0x4202
46386 #define CHIPREV_ID_5752_A0_HW 0x5000
46387 #define CHIPREV_ID_5752_A0 0x6000
46388diff --git a/drivers/net/ethernet/brocade/bna/bna_enet.c b/drivers/net/ethernet/brocade/bna/bna_enet.c
46389index 13f9636..228040f 100644
46390--- a/drivers/net/ethernet/brocade/bna/bna_enet.c
46391+++ b/drivers/net/ethernet/brocade/bna/bna_enet.c
46392@@ -1690,10 +1690,10 @@ bna_cb_ioceth_reset(void *arg)
46393 }
46394
46395 static struct bfa_ioc_cbfn bna_ioceth_cbfn = {
46396- bna_cb_ioceth_enable,
46397- bna_cb_ioceth_disable,
46398- bna_cb_ioceth_hbfail,
46399- bna_cb_ioceth_reset
46400+ .enable_cbfn = bna_cb_ioceth_enable,
46401+ .disable_cbfn = bna_cb_ioceth_disable,
46402+ .hbfail_cbfn = bna_cb_ioceth_hbfail,
46403+ .reset_cbfn = bna_cb_ioceth_reset
46404 };
46405
46406 static void bna_attr_init(struct bna_ioceth *ioceth)
46407diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.h b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
46408index 8cffcdf..aadf043 100644
46409--- a/drivers/net/ethernet/chelsio/cxgb3/l2t.h
46410+++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
46411@@ -87,7 +87,7 @@ typedef void (*arp_failure_handler_func)(struct t3cdev * dev,
46412 */
46413 struct l2t_skb_cb {
46414 arp_failure_handler_func arp_failure_handler;
46415-};
46416+} __no_const;
46417
46418 #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
46419
46420diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
46421index 34e2488..07e2079 100644
46422--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
46423+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
46424@@ -2120,7 +2120,7 @@ static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
46425
46426 int i;
46427 struct adapter *ap = netdev2adap(dev);
46428- static const unsigned int *reg_ranges;
46429+ const unsigned int *reg_ranges;
46430 int arr_size = 0, buf_size = 0;
46431
46432 if (is_t4(ap->params.chip)) {
46433diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
46434index c05b66d..ed69872 100644
46435--- a/drivers/net/ethernet/dec/tulip/de4x5.c
46436+++ b/drivers/net/ethernet/dec/tulip/de4x5.c
46437@@ -5388,7 +5388,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
46438 for (i=0; i<ETH_ALEN; i++) {
46439 tmp.addr[i] = dev->dev_addr[i];
46440 }
46441- if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
46442+ if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
46443 break;
46444
46445 case DE4X5_SET_HWADDR: /* Set the hardware address */
46446@@ -5428,7 +5428,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
46447 spin_lock_irqsave(&lp->lock, flags);
46448 memcpy(&statbuf, &lp->pktStats, ioc->len);
46449 spin_unlock_irqrestore(&lp->lock, flags);
46450- if (copy_to_user(ioc->data, &statbuf, ioc->len))
46451+ if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
46452 return -EFAULT;
46453 break;
46454 }
46455diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
46456index 36c8061..ca5e1e0 100644
46457--- a/drivers/net/ethernet/emulex/benet/be_main.c
46458+++ b/drivers/net/ethernet/emulex/benet/be_main.c
46459@@ -534,7 +534,7 @@ static void accumulate_16bit_val(u32 *acc, u16 val)
46460
46461 if (wrapped)
46462 newacc += 65536;
46463- ACCESS_ONCE(*acc) = newacc;
46464+ ACCESS_ONCE_RW(*acc) = newacc;
46465 }
46466
46467 static void populate_erx_stats(struct be_adapter *adapter,
46468diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
46469index c11ecbc..13bb299 100644
46470--- a/drivers/net/ethernet/faraday/ftgmac100.c
46471+++ b/drivers/net/ethernet/faraday/ftgmac100.c
46472@@ -30,6 +30,8 @@
46473 #include <linux/netdevice.h>
46474 #include <linux/phy.h>
46475 #include <linux/platform_device.h>
46476+#include <linux/interrupt.h>
46477+#include <linux/irqreturn.h>
46478 #include <net/ip.h>
46479
46480 #include "ftgmac100.h"
46481diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c
46482index 8be5b40..081bc1b 100644
46483--- a/drivers/net/ethernet/faraday/ftmac100.c
46484+++ b/drivers/net/ethernet/faraday/ftmac100.c
46485@@ -31,6 +31,8 @@
46486 #include <linux/module.h>
46487 #include <linux/netdevice.h>
46488 #include <linux/platform_device.h>
46489+#include <linux/interrupt.h>
46490+#include <linux/irqreturn.h>
46491
46492 #include "ftmac100.h"
46493
46494diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
46495index e33ec6c..f54cfe7 100644
46496--- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c
46497+++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
46498@@ -436,7 +436,7 @@ void i40e_ptp_set_increment(struct i40e_pf *pf)
46499 wr32(hw, I40E_PRTTSYN_INC_H, incval >> 32);
46500
46501 /* Update the base adjustement value. */
46502- ACCESS_ONCE(pf->ptp_base_adj) = incval;
46503+ ACCESS_ONCE_RW(pf->ptp_base_adj) = incval;
46504 smp_mb(); /* Force the above update. */
46505 }
46506
46507diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
46508index 5184e2a..acb28c3 100644
46509--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
46510+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
46511@@ -776,7 +776,7 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
46512 }
46513
46514 /* update the base incval used to calculate frequency adjustment */
46515- ACCESS_ONCE(adapter->base_incval) = incval;
46516+ ACCESS_ONCE_RW(adapter->base_incval) = incval;
46517 smp_mb();
46518
46519 /* need lock to prevent incorrect read while modifying cyclecounter */
46520diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.c b/drivers/net/ethernet/neterion/vxge/vxge-config.c
46521index 089b713..28d87ae 100644
46522--- a/drivers/net/ethernet/neterion/vxge/vxge-config.c
46523+++ b/drivers/net/ethernet/neterion/vxge/vxge-config.c
46524@@ -3461,7 +3461,10 @@ __vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp,
46525 struct __vxge_hw_fifo *fifo;
46526 struct vxge_hw_fifo_config *config;
46527 u32 txdl_size, txdl_per_memblock;
46528- struct vxge_hw_mempool_cbs fifo_mp_callback;
46529+ static struct vxge_hw_mempool_cbs fifo_mp_callback = {
46530+ .item_func_alloc = __vxge_hw_fifo_mempool_item_alloc,
46531+ };
46532+
46533 struct __vxge_hw_virtualpath *vpath;
46534
46535 if ((vp == NULL) || (attr == NULL)) {
46536@@ -3544,8 +3547,6 @@ __vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp,
46537 goto exit;
46538 }
46539
46540- fifo_mp_callback.item_func_alloc = __vxge_hw_fifo_mempool_item_alloc;
46541-
46542 fifo->mempool =
46543 __vxge_hw_mempool_create(vpath->hldev,
46544 fifo->config->memblock_size,
46545diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
46546index 90a2dda..47e620e 100644
46547--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
46548+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
46549@@ -2088,7 +2088,9 @@ int qlcnic_83xx_configure_opmode(struct qlcnic_adapter *adapter)
46550 adapter->max_tx_rings = QLCNIC_MAX_VNIC_TX_RINGS;
46551 } else if (ret == QLC_83XX_DEFAULT_OPMODE) {
46552 ahw->nic_mode = QLCNIC_DEFAULT_MODE;
46553- adapter->nic_ops->init_driver = qlcnic_83xx_init_default_driver;
46554+ pax_open_kernel();
46555+ *(void **)&adapter->nic_ops->init_driver = qlcnic_83xx_init_default_driver;
46556+ pax_close_kernel();
46557 ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
46558 adapter->max_sds_rings = QLCNIC_MAX_SDS_RINGS;
46559 adapter->max_tx_rings = QLCNIC_MAX_TX_RINGS;
46560diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
46561index be7d7a6..a8983f8 100644
46562--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
46563+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
46564@@ -207,17 +207,23 @@ int qlcnic_83xx_config_vnic_opmode(struct qlcnic_adapter *adapter)
46565 case QLCNIC_NON_PRIV_FUNC:
46566 ahw->op_mode = QLCNIC_NON_PRIV_FUNC;
46567 ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
46568- nic_ops->init_driver = qlcnic_83xx_init_non_privileged_vnic;
46569+ pax_open_kernel();
46570+ *(void **)&nic_ops->init_driver = qlcnic_83xx_init_non_privileged_vnic;
46571+ pax_close_kernel();
46572 break;
46573 case QLCNIC_PRIV_FUNC:
46574 ahw->op_mode = QLCNIC_PRIV_FUNC;
46575 ahw->idc.state_entry = qlcnic_83xx_idc_vnic_pf_entry;
46576- nic_ops->init_driver = qlcnic_83xx_init_privileged_vnic;
46577+ pax_open_kernel();
46578+ *(void **)&nic_ops->init_driver = qlcnic_83xx_init_privileged_vnic;
46579+ pax_close_kernel();
46580 break;
46581 case QLCNIC_MGMT_FUNC:
46582 ahw->op_mode = QLCNIC_MGMT_FUNC;
46583 ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
46584- nic_ops->init_driver = qlcnic_83xx_init_mgmt_vnic;
46585+ pax_open_kernel();
46586+ *(void **)&nic_ops->init_driver = qlcnic_83xx_init_mgmt_vnic;
46587+ pax_close_kernel();
46588 break;
46589 default:
46590 dev_err(&adapter->pdev->dev, "Invalid Virtual NIC opmode\n");
46591diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c
46592index 7d4f549..3e46c89 100644
46593--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c
46594+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c
46595@@ -1022,6 +1022,7 @@ static int qlcnic_dcb_peer_app_info(struct net_device *netdev,
46596 struct qlcnic_dcb_cee *peer;
46597 int i;
46598
46599+ memset(info, 0, sizeof(*info));
46600 *app_count = 0;
46601
46602 if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state))
46603diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
46604index 7763962..c3499a7 100644
46605--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
46606+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
46607@@ -1108,7 +1108,7 @@ int qlcnic_dump_fw(struct qlcnic_adapter *adapter)
46608 struct qlcnic_dump_entry *entry;
46609 struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
46610 struct qlcnic_dump_template_hdr *tmpl_hdr = fw_dump->tmpl_hdr;
46611- static const struct qlcnic_dump_operations *fw_dump_ops;
46612+ const struct qlcnic_dump_operations *fw_dump_ops;
46613 struct device *dev = &adapter->pdev->dev;
46614 struct qlcnic_hardware_context *ahw;
46615 void *temp_buffer;
46616diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
46617index 3ff7bc3..366091b 100644
46618--- a/drivers/net/ethernet/realtek/r8169.c
46619+++ b/drivers/net/ethernet/realtek/r8169.c
46620@@ -758,22 +758,22 @@ struct rtl8169_private {
46621 struct mdio_ops {
46622 void (*write)(struct rtl8169_private *, int, int);
46623 int (*read)(struct rtl8169_private *, int);
46624- } mdio_ops;
46625+ } __no_const mdio_ops;
46626
46627 struct pll_power_ops {
46628 void (*down)(struct rtl8169_private *);
46629 void (*up)(struct rtl8169_private *);
46630- } pll_power_ops;
46631+ } __no_const pll_power_ops;
46632
46633 struct jumbo_ops {
46634 void (*enable)(struct rtl8169_private *);
46635 void (*disable)(struct rtl8169_private *);
46636- } jumbo_ops;
46637+ } __no_const jumbo_ops;
46638
46639 struct csi_ops {
46640 void (*write)(struct rtl8169_private *, int, int);
46641 u32 (*read)(struct rtl8169_private *, int);
46642- } csi_ops;
46643+ } __no_const csi_ops;
46644
46645 int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
46646 int (*get_settings)(struct net_device *, struct ethtool_cmd *);
46647diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
46648index d7a3682..9ce272a 100644
46649--- a/drivers/net/ethernet/sfc/ptp.c
46650+++ b/drivers/net/ethernet/sfc/ptp.c
46651@@ -825,7 +825,7 @@ static int efx_ptp_synchronize(struct efx_nic *efx, unsigned int num_readings)
46652 ptp->start.dma_addr);
46653
46654 /* Clear flag that signals MC ready */
46655- ACCESS_ONCE(*start) = 0;
46656+ ACCESS_ONCE_RW(*start) = 0;
46657 rc = efx_mcdi_rpc_start(efx, MC_CMD_PTP, synch_buf,
46658 MC_CMD_PTP_IN_SYNCHRONIZE_LEN);
46659 EFX_BUG_ON_PARANOID(rc);
46660diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
46661index 50617c5..b13724c 100644
46662--- a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
46663+++ b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
46664@@ -140,8 +140,8 @@ void dwmac_mmc_ctrl(void __iomem *ioaddr, unsigned int mode)
46665
46666 writel(value, ioaddr + MMC_CNTRL);
46667
46668- pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n",
46669- MMC_CNTRL, value);
46670+// pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n",
46671+// MMC_CNTRL, value);
46672 }
46673
46674 /* To mask all all interrupts.*/
46675diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
46676index 7b594ce..1f6c5708 100644
46677--- a/drivers/net/hyperv/hyperv_net.h
46678+++ b/drivers/net/hyperv/hyperv_net.h
46679@@ -100,7 +100,7 @@ struct rndis_device {
46680
46681 enum rndis_device_state state;
46682 bool link_state;
46683- atomic_t new_req_id;
46684+ atomic_unchecked_t new_req_id;
46685
46686 spinlock_t request_lock;
46687 struct list_head req_list;
46688diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
46689index b54fd25..9bd2bae 100644
46690--- a/drivers/net/hyperv/rndis_filter.c
46691+++ b/drivers/net/hyperv/rndis_filter.c
46692@@ -103,7 +103,7 @@ static struct rndis_request *get_rndis_request(struct rndis_device *dev,
46693 * template
46694 */
46695 set = &rndis_msg->msg.set_req;
46696- set->req_id = atomic_inc_return(&dev->new_req_id);
46697+ set->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
46698
46699 /* Add to the request list */
46700 spin_lock_irqsave(&dev->request_lock, flags);
46701@@ -770,7 +770,7 @@ static void rndis_filter_halt_device(struct rndis_device *dev)
46702
46703 /* Setup the rndis set */
46704 halt = &request->request_msg.msg.halt_req;
46705- halt->req_id = atomic_inc_return(&dev->new_req_id);
46706+ halt->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
46707
46708 /* Ignore return since this msg is optional. */
46709 rndis_filter_send_request(dev, request);
46710diff --git a/drivers/net/ieee802154/fakehard.c b/drivers/net/ieee802154/fakehard.c
46711index bf0d55e..82bcfbd1 100644
46712--- a/drivers/net/ieee802154/fakehard.c
46713+++ b/drivers/net/ieee802154/fakehard.c
46714@@ -364,7 +364,7 @@ static int ieee802154fake_probe(struct platform_device *pdev)
46715 phy->transmit_power = 0xbf;
46716
46717 dev->netdev_ops = &fake_ops;
46718- dev->ml_priv = &fake_mlme;
46719+ dev->ml_priv = (void *)&fake_mlme;
46720
46721 priv = netdev_priv(dev);
46722 priv->phy = phy;
46723diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
46724index 20bb669..9a0e17e 100644
46725--- a/drivers/net/macvlan.c
46726+++ b/drivers/net/macvlan.c
46727@@ -991,13 +991,15 @@ static const struct nla_policy macvlan_policy[IFLA_MACVLAN_MAX + 1] = {
46728 int macvlan_link_register(struct rtnl_link_ops *ops)
46729 {
46730 /* common fields */
46731- ops->priv_size = sizeof(struct macvlan_dev);
46732- ops->validate = macvlan_validate;
46733- ops->maxtype = IFLA_MACVLAN_MAX;
46734- ops->policy = macvlan_policy;
46735- ops->changelink = macvlan_changelink;
46736- ops->get_size = macvlan_get_size;
46737- ops->fill_info = macvlan_fill_info;
46738+ pax_open_kernel();
46739+ *(size_t *)&ops->priv_size = sizeof(struct macvlan_dev);
46740+ *(void **)&ops->validate = macvlan_validate;
46741+ *(int *)&ops->maxtype = IFLA_MACVLAN_MAX;
46742+ *(const void **)&ops->policy = macvlan_policy;
46743+ *(void **)&ops->changelink = macvlan_changelink;
46744+ *(void **)&ops->get_size = macvlan_get_size;
46745+ *(void **)&ops->fill_info = macvlan_fill_info;
46746+ pax_close_kernel();
46747
46748 return rtnl_link_register(ops);
46749 };
46750@@ -1052,7 +1054,7 @@ static int macvlan_device_event(struct notifier_block *unused,
46751 return NOTIFY_DONE;
46752 }
46753
46754-static struct notifier_block macvlan_notifier_block __read_mostly = {
46755+static struct notifier_block macvlan_notifier_block = {
46756 .notifier_call = macvlan_device_event,
46757 };
46758
46759diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
46760index 3381c4f..dea5fd5 100644
46761--- a/drivers/net/macvtap.c
46762+++ b/drivers/net/macvtap.c
46763@@ -1020,7 +1020,7 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd,
46764 }
46765
46766 ret = 0;
46767- if (copy_to_user(&ifr->ifr_name, vlan->dev->name, IFNAMSIZ) ||
46768+ if (copy_to_user(ifr->ifr_name, vlan->dev->name, IFNAMSIZ) ||
46769 put_user(q->flags, &ifr->ifr_flags))
46770 ret = -EFAULT;
46771 macvtap_put_vlan(vlan);
46772@@ -1190,7 +1190,7 @@ static int macvtap_device_event(struct notifier_block *unused,
46773 return NOTIFY_DONE;
46774 }
46775
46776-static struct notifier_block macvtap_notifier_block __read_mostly = {
46777+static struct notifier_block macvtap_notifier_block = {
46778 .notifier_call = macvtap_device_event,
46779 };
46780
46781diff --git a/drivers/net/phy/mdio-bitbang.c b/drivers/net/phy/mdio-bitbang.c
46782index daec9b0..6428fcb 100644
46783--- a/drivers/net/phy/mdio-bitbang.c
46784+++ b/drivers/net/phy/mdio-bitbang.c
46785@@ -234,6 +234,7 @@ void free_mdio_bitbang(struct mii_bus *bus)
46786 struct mdiobb_ctrl *ctrl = bus->priv;
46787
46788 module_put(ctrl->ops->owner);
46789+ mdiobus_unregister(bus);
46790 mdiobus_free(bus);
46791 }
46792 EXPORT_SYMBOL(free_mdio_bitbang);
46793diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
46794index 72ff14b..11d442d 100644
46795--- a/drivers/net/ppp/ppp_generic.c
46796+++ b/drivers/net/ppp/ppp_generic.c
46797@@ -999,7 +999,6 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
46798 void __user *addr = (void __user *) ifr->ifr_ifru.ifru_data;
46799 struct ppp_stats stats;
46800 struct ppp_comp_stats cstats;
46801- char *vers;
46802
46803 switch (cmd) {
46804 case SIOCGPPPSTATS:
46805@@ -1021,8 +1020,7 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
46806 break;
46807
46808 case SIOCGPPPVER:
46809- vers = PPP_VERSION;
46810- if (copy_to_user(addr, vers, strlen(vers) + 1))
46811+ if (copy_to_user(addr, PPP_VERSION, sizeof(PPP_VERSION)))
46812 break;
46813 err = 0;
46814 break;
46815diff --git a/drivers/net/slip/slhc.c b/drivers/net/slip/slhc.c
46816index 1252d9c..80e660b 100644
46817--- a/drivers/net/slip/slhc.c
46818+++ b/drivers/net/slip/slhc.c
46819@@ -488,7 +488,7 @@ slhc_uncompress(struct slcompress *comp, unsigned char *icp, int isize)
46820 register struct tcphdr *thp;
46821 register struct iphdr *ip;
46822 register struct cstate *cs;
46823- int len, hdrlen;
46824+ long len, hdrlen;
46825 unsigned char *cp = icp;
46826
46827 /* We've got a compressed packet; read the change byte */
46828diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
46829index c8624a8..f0a4f6a 100644
46830--- a/drivers/net/team/team.c
46831+++ b/drivers/net/team/team.c
46832@@ -2869,7 +2869,7 @@ static int team_device_event(struct notifier_block *unused,
46833 return NOTIFY_DONE;
46834 }
46835
46836-static struct notifier_block team_notifier_block __read_mostly = {
46837+static struct notifier_block team_notifier_block = {
46838 .notifier_call = team_device_event,
46839 };
46840
46841diff --git a/drivers/net/tun.c b/drivers/net/tun.c
46842index 26f8635..c237839 100644
46843--- a/drivers/net/tun.c
46844+++ b/drivers/net/tun.c
46845@@ -1876,7 +1876,7 @@ unlock:
46846 }
46847
46848 static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
46849- unsigned long arg, int ifreq_len)
46850+ unsigned long arg, size_t ifreq_len)
46851 {
46852 struct tun_file *tfile = file->private_data;
46853 struct tun_struct *tun;
46854@@ -1889,6 +1889,9 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
46855 unsigned int ifindex;
46856 int ret;
46857
46858+ if (ifreq_len > sizeof ifr)
46859+ return -EFAULT;
46860+
46861 if (cmd == TUNSETIFF || cmd == TUNSETQUEUE || _IOC_TYPE(cmd) == 0x89) {
46862 if (copy_from_user(&ifr, argp, ifreq_len))
46863 return -EFAULT;
46864diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
46865index 660bd5e..ac59452 100644
46866--- a/drivers/net/usb/hso.c
46867+++ b/drivers/net/usb/hso.c
46868@@ -71,7 +71,7 @@
46869 #include <asm/byteorder.h>
46870 #include <linux/serial_core.h>
46871 #include <linux/serial.h>
46872-
46873+#include <asm/local.h>
46874
46875 #define MOD_AUTHOR "Option Wireless"
46876 #define MOD_DESCRIPTION "USB High Speed Option driver"
46877@@ -1179,7 +1179,7 @@ static void put_rxbuf_data_and_resubmit_ctrl_urb(struct hso_serial *serial)
46878 struct urb *urb;
46879
46880 urb = serial->rx_urb[0];
46881- if (serial->port.count > 0) {
46882+ if (atomic_read(&serial->port.count) > 0) {
46883 count = put_rxbuf_data(urb, serial);
46884 if (count == -1)
46885 return;
46886@@ -1217,7 +1217,7 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb)
46887 DUMP1(urb->transfer_buffer, urb->actual_length);
46888
46889 /* Anyone listening? */
46890- if (serial->port.count == 0)
46891+ if (atomic_read(&serial->port.count) == 0)
46892 return;
46893
46894 if (serial->parent->port_spec & HSO_INFO_CRC_BUG)
46895@@ -1287,8 +1287,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
46896 tty_port_tty_set(&serial->port, tty);
46897
46898 /* check for port already opened, if not set the termios */
46899- serial->port.count++;
46900- if (serial->port.count == 1) {
46901+ if (atomic_inc_return(&serial->port.count) == 1) {
46902 serial->rx_state = RX_IDLE;
46903 /* Force default termio settings */
46904 _hso_serial_set_termios(tty, NULL);
46905@@ -1300,7 +1299,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
46906 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
46907 if (result) {
46908 hso_stop_serial_device(serial->parent);
46909- serial->port.count--;
46910+ atomic_dec(&serial->port.count);
46911 kref_put(&serial->parent->ref, hso_serial_ref_free);
46912 }
46913 } else {
46914@@ -1337,10 +1336,10 @@ static void hso_serial_close(struct tty_struct *tty, struct file *filp)
46915
46916 /* reset the rts and dtr */
46917 /* do the actual close */
46918- serial->port.count--;
46919+ atomic_dec(&serial->port.count);
46920
46921- if (serial->port.count <= 0) {
46922- serial->port.count = 0;
46923+ if (atomic_read(&serial->port.count) <= 0) {
46924+ atomic_set(&serial->port.count, 0);
46925 tty_port_tty_set(&serial->port, NULL);
46926 if (!usb_gone)
46927 hso_stop_serial_device(serial->parent);
46928@@ -1416,7 +1415,7 @@ static void hso_serial_set_termios(struct tty_struct *tty, struct ktermios *old)
46929
46930 /* the actual setup */
46931 spin_lock_irqsave(&serial->serial_lock, flags);
46932- if (serial->port.count)
46933+ if (atomic_read(&serial->port.count))
46934 _hso_serial_set_termios(tty, old);
46935 else
46936 tty->termios = *old;
46937@@ -1885,7 +1884,7 @@ static void intr_callback(struct urb *urb)
46938 D1("Pending read interrupt on port %d\n", i);
46939 spin_lock(&serial->serial_lock);
46940 if (serial->rx_state == RX_IDLE &&
46941- serial->port.count > 0) {
46942+ atomic_read(&serial->port.count) > 0) {
46943 /* Setup and send a ctrl req read on
46944 * port i */
46945 if (!serial->rx_urb_filled[0]) {
46946@@ -3061,7 +3060,7 @@ static int hso_resume(struct usb_interface *iface)
46947 /* Start all serial ports */
46948 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
46949 if (serial_table[i] && (serial_table[i]->interface == iface)) {
46950- if (dev2ser(serial_table[i])->port.count) {
46951+ if (atomic_read(&dev2ser(serial_table[i])->port.count)) {
46952 result =
46953 hso_start_serial_device(serial_table[i], GFP_NOIO);
46954 hso_kick_transmit(dev2ser(serial_table[i]));
46955diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
46956index adb12f3..48005ab 100644
46957--- a/drivers/net/usb/r8152.c
46958+++ b/drivers/net/usb/r8152.c
46959@@ -513,7 +513,7 @@ struct r8152 {
46960 void (*disable)(struct r8152 *);
46961 void (*down)(struct r8152 *);
46962 void (*unload)(struct r8152 *);
46963- } rtl_ops;
46964+ } __no_const rtl_ops;
46965
46966 int intr_interval;
46967 u32 msg_enable;
46968diff --git a/drivers/net/usb/sierra_net.c b/drivers/net/usb/sierra_net.c
46969index a2515887..6d13233 100644
46970--- a/drivers/net/usb/sierra_net.c
46971+++ b/drivers/net/usb/sierra_net.c
46972@@ -51,7 +51,7 @@ static const char driver_name[] = "sierra_net";
46973 /* atomic counter partially included in MAC address to make sure 2 devices
46974 * do not end up with the same MAC - concept breaks in case of > 255 ifaces
46975 */
46976-static atomic_t iface_counter = ATOMIC_INIT(0);
46977+static atomic_unchecked_t iface_counter = ATOMIC_INIT(0);
46978
46979 /*
46980 * SYNC Timer Delay definition used to set the expiry time
46981@@ -697,7 +697,7 @@ static int sierra_net_bind(struct usbnet *dev, struct usb_interface *intf)
46982 dev->net->netdev_ops = &sierra_net_device_ops;
46983
46984 /* change MAC addr to include, ifacenum, and to be unique */
46985- dev->net->dev_addr[ETH_ALEN-2] = atomic_inc_return(&iface_counter);
46986+ dev->net->dev_addr[ETH_ALEN-2] = atomic_inc_return_unchecked(&iface_counter);
46987 dev->net->dev_addr[ETH_ALEN-1] = ifacenum;
46988
46989 /* we will have to manufacture ethernet headers, prepare template */
46990diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
46991index d091e52..568bb179 100644
46992--- a/drivers/net/vxlan.c
46993+++ b/drivers/net/vxlan.c
46994@@ -2847,7 +2847,7 @@ nla_put_failure:
46995 return -EMSGSIZE;
46996 }
46997
46998-static struct rtnl_link_ops vxlan_link_ops __read_mostly = {
46999+static struct rtnl_link_ops vxlan_link_ops = {
47000 .kind = "vxlan",
47001 .maxtype = IFLA_VXLAN_MAX,
47002 .policy = vxlan_policy,
47003@@ -2894,7 +2894,7 @@ static int vxlan_lowerdev_event(struct notifier_block *unused,
47004 return NOTIFY_DONE;
47005 }
47006
47007-static struct notifier_block vxlan_notifier_block __read_mostly = {
47008+static struct notifier_block vxlan_notifier_block = {
47009 .notifier_call = vxlan_lowerdev_event,
47010 };
47011
47012diff --git a/drivers/net/wan/lmc/lmc_media.c b/drivers/net/wan/lmc/lmc_media.c
47013index 5920c99..ff2e4a5 100644
47014--- a/drivers/net/wan/lmc/lmc_media.c
47015+++ b/drivers/net/wan/lmc/lmc_media.c
47016@@ -95,62 +95,63 @@ static inline void write_av9110_bit (lmc_softc_t *, int);
47017 static void write_av9110(lmc_softc_t *, u32, u32, u32, u32, u32);
47018
47019 lmc_media_t lmc_ds3_media = {
47020- lmc_ds3_init, /* special media init stuff */
47021- lmc_ds3_default, /* reset to default state */
47022- lmc_ds3_set_status, /* reset status to state provided */
47023- lmc_dummy_set_1, /* set clock source */
47024- lmc_dummy_set2_1, /* set line speed */
47025- lmc_ds3_set_100ft, /* set cable length */
47026- lmc_ds3_set_scram, /* set scrambler */
47027- lmc_ds3_get_link_status, /* get link status */
47028- lmc_dummy_set_1, /* set link status */
47029- lmc_ds3_set_crc_length, /* set CRC length */
47030- lmc_dummy_set_1, /* set T1 or E1 circuit type */
47031- lmc_ds3_watchdog
47032+ .init = lmc_ds3_init, /* special media init stuff */
47033+ .defaults = lmc_ds3_default, /* reset to default state */
47034+ .set_status = lmc_ds3_set_status, /* reset status to state provided */
47035+ .set_clock_source = lmc_dummy_set_1, /* set clock source */
47036+ .set_speed = lmc_dummy_set2_1, /* set line speed */
47037+ .set_cable_length = lmc_ds3_set_100ft, /* set cable length */
47038+ .set_scrambler = lmc_ds3_set_scram, /* set scrambler */
47039+ .get_link_status = lmc_ds3_get_link_status, /* get link status */
47040+ .set_link_status = lmc_dummy_set_1, /* set link status */
47041+ .set_crc_length = lmc_ds3_set_crc_length, /* set CRC length */
47042+ .set_circuit_type = lmc_dummy_set_1, /* set T1 or E1 circuit type */
47043+ .watchdog = lmc_ds3_watchdog
47044 };
47045
47046 lmc_media_t lmc_hssi_media = {
47047- lmc_hssi_init, /* special media init stuff */
47048- lmc_hssi_default, /* reset to default state */
47049- lmc_hssi_set_status, /* reset status to state provided */
47050- lmc_hssi_set_clock, /* set clock source */
47051- lmc_dummy_set2_1, /* set line speed */
47052- lmc_dummy_set_1, /* set cable length */
47053- lmc_dummy_set_1, /* set scrambler */
47054- lmc_hssi_get_link_status, /* get link status */
47055- lmc_hssi_set_link_status, /* set link status */
47056- lmc_hssi_set_crc_length, /* set CRC length */
47057- lmc_dummy_set_1, /* set T1 or E1 circuit type */
47058- lmc_hssi_watchdog
47059+ .init = lmc_hssi_init, /* special media init stuff */
47060+ .defaults = lmc_hssi_default, /* reset to default state */
47061+ .set_status = lmc_hssi_set_status, /* reset status to state provided */
47062+ .set_clock_source = lmc_hssi_set_clock, /* set clock source */
47063+ .set_speed = lmc_dummy_set2_1, /* set line speed */
47064+ .set_cable_length = lmc_dummy_set_1, /* set cable length */
47065+ .set_scrambler = lmc_dummy_set_1, /* set scrambler */
47066+ .get_link_status = lmc_hssi_get_link_status, /* get link status */
47067+ .set_link_status = lmc_hssi_set_link_status, /* set link status */
47068+ .set_crc_length = lmc_hssi_set_crc_length, /* set CRC length */
47069+ .set_circuit_type = lmc_dummy_set_1, /* set T1 or E1 circuit type */
47070+ .watchdog = lmc_hssi_watchdog
47071 };
47072
47073-lmc_media_t lmc_ssi_media = { lmc_ssi_init, /* special media init stuff */
47074- lmc_ssi_default, /* reset to default state */
47075- lmc_ssi_set_status, /* reset status to state provided */
47076- lmc_ssi_set_clock, /* set clock source */
47077- lmc_ssi_set_speed, /* set line speed */
47078- lmc_dummy_set_1, /* set cable length */
47079- lmc_dummy_set_1, /* set scrambler */
47080- lmc_ssi_get_link_status, /* get link status */
47081- lmc_ssi_set_link_status, /* set link status */
47082- lmc_ssi_set_crc_length, /* set CRC length */
47083- lmc_dummy_set_1, /* set T1 or E1 circuit type */
47084- lmc_ssi_watchdog
47085+lmc_media_t lmc_ssi_media = {
47086+ .init = lmc_ssi_init, /* special media init stuff */
47087+ .defaults = lmc_ssi_default, /* reset to default state */
47088+ .set_status = lmc_ssi_set_status, /* reset status to state provided */
47089+ .set_clock_source = lmc_ssi_set_clock, /* set clock source */
47090+ .set_speed = lmc_ssi_set_speed, /* set line speed */
47091+ .set_cable_length = lmc_dummy_set_1, /* set cable length */
47092+ .set_scrambler = lmc_dummy_set_1, /* set scrambler */
47093+ .get_link_status = lmc_ssi_get_link_status, /* get link status */
47094+ .set_link_status = lmc_ssi_set_link_status, /* set link status */
47095+ .set_crc_length = lmc_ssi_set_crc_length, /* set CRC length */
47096+ .set_circuit_type = lmc_dummy_set_1, /* set T1 or E1 circuit type */
47097+ .watchdog = lmc_ssi_watchdog
47098 };
47099
47100 lmc_media_t lmc_t1_media = {
47101- lmc_t1_init, /* special media init stuff */
47102- lmc_t1_default, /* reset to default state */
47103- lmc_t1_set_status, /* reset status to state provided */
47104- lmc_t1_set_clock, /* set clock source */
47105- lmc_dummy_set2_1, /* set line speed */
47106- lmc_dummy_set_1, /* set cable length */
47107- lmc_dummy_set_1, /* set scrambler */
47108- lmc_t1_get_link_status, /* get link status */
47109- lmc_dummy_set_1, /* set link status */
47110- lmc_t1_set_crc_length, /* set CRC length */
47111- lmc_t1_set_circuit_type, /* set T1 or E1 circuit type */
47112- lmc_t1_watchdog
47113+ .init = lmc_t1_init, /* special media init stuff */
47114+ .defaults = lmc_t1_default, /* reset to default state */
47115+ .set_status = lmc_t1_set_status, /* reset status to state provided */
47116+ .set_clock_source = lmc_t1_set_clock, /* set clock source */
47117+ .set_speed = lmc_dummy_set2_1, /* set line speed */
47118+ .set_cable_length = lmc_dummy_set_1, /* set cable length */
47119+ .set_scrambler = lmc_dummy_set_1, /* set scrambler */
47120+ .get_link_status = lmc_t1_get_link_status, /* get link status */
47121+ .set_link_status = lmc_dummy_set_1, /* set link status */
47122+ .set_crc_length = lmc_t1_set_crc_length, /* set CRC length */
47123+ .set_circuit_type = lmc_t1_set_circuit_type, /* set T1 or E1 circuit type */
47124+ .watchdog = lmc_t1_watchdog
47125 };
47126
47127 static void
47128diff --git a/drivers/net/wan/z85230.c b/drivers/net/wan/z85230.c
47129index feacc3b..5bac0de 100644
47130--- a/drivers/net/wan/z85230.c
47131+++ b/drivers/net/wan/z85230.c
47132@@ -485,9 +485,9 @@ static void z8530_status(struct z8530_channel *chan)
47133
47134 struct z8530_irqhandler z8530_sync =
47135 {
47136- z8530_rx,
47137- z8530_tx,
47138- z8530_status
47139+ .rx = z8530_rx,
47140+ .tx = z8530_tx,
47141+ .status = z8530_status
47142 };
47143
47144 EXPORT_SYMBOL(z8530_sync);
47145@@ -605,15 +605,15 @@ static void z8530_dma_status(struct z8530_channel *chan)
47146 }
47147
47148 static struct z8530_irqhandler z8530_dma_sync = {
47149- z8530_dma_rx,
47150- z8530_dma_tx,
47151- z8530_dma_status
47152+ .rx = z8530_dma_rx,
47153+ .tx = z8530_dma_tx,
47154+ .status = z8530_dma_status
47155 };
47156
47157 static struct z8530_irqhandler z8530_txdma_sync = {
47158- z8530_rx,
47159- z8530_dma_tx,
47160- z8530_dma_status
47161+ .rx = z8530_rx,
47162+ .tx = z8530_dma_tx,
47163+ .status = z8530_dma_status
47164 };
47165
47166 /**
47167@@ -680,9 +680,9 @@ static void z8530_status_clear(struct z8530_channel *chan)
47168
47169 struct z8530_irqhandler z8530_nop=
47170 {
47171- z8530_rx_clear,
47172- z8530_tx_clear,
47173- z8530_status_clear
47174+ .rx = z8530_rx_clear,
47175+ .tx = z8530_tx_clear,
47176+ .status = z8530_status_clear
47177 };
47178
47179
47180diff --git a/drivers/net/wimax/i2400m/rx.c b/drivers/net/wimax/i2400m/rx.c
47181index 0b60295..b8bfa5b 100644
47182--- a/drivers/net/wimax/i2400m/rx.c
47183+++ b/drivers/net/wimax/i2400m/rx.c
47184@@ -1359,7 +1359,7 @@ int i2400m_rx_setup(struct i2400m *i2400m)
47185 if (i2400m->rx_roq == NULL)
47186 goto error_roq_alloc;
47187
47188- rd = kcalloc(I2400M_RO_CIN + 1, sizeof(*i2400m->rx_roq[0].log),
47189+ rd = kcalloc(sizeof(*i2400m->rx_roq[0].log), I2400M_RO_CIN + 1,
47190 GFP_KERNEL);
47191 if (rd == NULL) {
47192 result = -ENOMEM;
47193diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
47194index edf4b57..68b51c0 100644
47195--- a/drivers/net/wireless/airo.c
47196+++ b/drivers/net/wireless/airo.c
47197@@ -7843,7 +7843,7 @@ static int writerids(struct net_device *dev, aironet_ioctl *comp) {
47198 struct airo_info *ai = dev->ml_priv;
47199 int ridcode;
47200 int enabled;
47201- static int (* writer)(struct airo_info *, u16 rid, const void *, int, int);
47202+ int (* writer)(struct airo_info *, u16 rid, const void *, int, int);
47203 unsigned char *iobuf;
47204
47205 /* Only super-user can write RIDs */
47206diff --git a/drivers/net/wireless/at76c50x-usb.c b/drivers/net/wireless/at76c50x-usb.c
47207index 99b3bfa..9559372 100644
47208--- a/drivers/net/wireless/at76c50x-usb.c
47209+++ b/drivers/net/wireless/at76c50x-usb.c
47210@@ -353,7 +353,7 @@ static int at76_dfu_get_state(struct usb_device *udev, u8 *state)
47211 }
47212
47213 /* Convert timeout from the DFU status to jiffies */
47214-static inline unsigned long at76_get_timeout(struct dfu_status *s)
47215+static inline unsigned long __intentional_overflow(-1) at76_get_timeout(struct dfu_status *s)
47216 {
47217 return msecs_to_jiffies((s->poll_timeout[2] << 16)
47218 | (s->poll_timeout[1] << 8)
47219diff --git a/drivers/net/wireless/ath/ath10k/htc.c b/drivers/net/wireless/ath/ath10k/htc.c
47220index edc57ab..ff49e0a 100644
47221--- a/drivers/net/wireless/ath/ath10k/htc.c
47222+++ b/drivers/net/wireless/ath/ath10k/htc.c
47223@@ -831,7 +831,10 @@ void ath10k_htc_stop(struct ath10k_htc *htc)
47224 /* registered target arrival callback from the HIF layer */
47225 int ath10k_htc_init(struct ath10k *ar)
47226 {
47227- struct ath10k_hif_cb htc_callbacks;
47228+ static struct ath10k_hif_cb htc_callbacks = {
47229+ .rx_completion = ath10k_htc_rx_completion_handler,
47230+ .tx_completion = ath10k_htc_tx_completion_handler,
47231+ };
47232 struct ath10k_htc_ep *ep = NULL;
47233 struct ath10k_htc *htc = &ar->htc;
47234
47235@@ -841,8 +844,6 @@ int ath10k_htc_init(struct ath10k *ar)
47236 ath10k_htc_reset_endpoint_states(htc);
47237
47238 /* setup HIF layer callbacks */
47239- htc_callbacks.rx_completion = ath10k_htc_rx_completion_handler;
47240- htc_callbacks.tx_completion = ath10k_htc_tx_completion_handler;
47241 htc->ar = ar;
47242
47243 /* Get HIF default pipe for HTC message exchange */
47244diff --git a/drivers/net/wireless/ath/ath10k/htc.h b/drivers/net/wireless/ath/ath10k/htc.h
47245index 4716d33..a688310 100644
47246--- a/drivers/net/wireless/ath/ath10k/htc.h
47247+++ b/drivers/net/wireless/ath/ath10k/htc.h
47248@@ -271,13 +271,13 @@ enum ath10k_htc_ep_id {
47249
47250 struct ath10k_htc_ops {
47251 void (*target_send_suspend_complete)(struct ath10k *ar);
47252-};
47253+} __no_const;
47254
47255 struct ath10k_htc_ep_ops {
47256 void (*ep_tx_complete)(struct ath10k *, struct sk_buff *);
47257 void (*ep_rx_complete)(struct ath10k *, struct sk_buff *);
47258 void (*ep_tx_credits)(struct ath10k *);
47259-};
47260+} __no_const;
47261
47262 /* service connection information */
47263 struct ath10k_htc_svc_conn_req {
47264diff --git a/drivers/net/wireless/ath/ath9k/ar9002_mac.c b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
47265index 741b38d..b7ae41b 100644
47266--- a/drivers/net/wireless/ath/ath9k/ar9002_mac.c
47267+++ b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
47268@@ -220,8 +220,8 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
47269 ads->ds_txstatus6 = ads->ds_txstatus7 = 0;
47270 ads->ds_txstatus8 = ads->ds_txstatus9 = 0;
47271
47272- ACCESS_ONCE(ads->ds_link) = i->link;
47273- ACCESS_ONCE(ads->ds_data) = i->buf_addr[0];
47274+ ACCESS_ONCE_RW(ads->ds_link) = i->link;
47275+ ACCESS_ONCE_RW(ads->ds_data) = i->buf_addr[0];
47276
47277 ctl1 = i->buf_len[0] | (i->is_last ? 0 : AR_TxMore);
47278 ctl6 = SM(i->keytype, AR_EncrType);
47279@@ -235,26 +235,26 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
47280
47281 if ((i->is_first || i->is_last) &&
47282 i->aggr != AGGR_BUF_MIDDLE && i->aggr != AGGR_BUF_LAST) {
47283- ACCESS_ONCE(ads->ds_ctl2) = set11nTries(i->rates, 0)
47284+ ACCESS_ONCE_RW(ads->ds_ctl2) = set11nTries(i->rates, 0)
47285 | set11nTries(i->rates, 1)
47286 | set11nTries(i->rates, 2)
47287 | set11nTries(i->rates, 3)
47288 | (i->dur_update ? AR_DurUpdateEna : 0)
47289 | SM(0, AR_BurstDur);
47290
47291- ACCESS_ONCE(ads->ds_ctl3) = set11nRate(i->rates, 0)
47292+ ACCESS_ONCE_RW(ads->ds_ctl3) = set11nRate(i->rates, 0)
47293 | set11nRate(i->rates, 1)
47294 | set11nRate(i->rates, 2)
47295 | set11nRate(i->rates, 3);
47296 } else {
47297- ACCESS_ONCE(ads->ds_ctl2) = 0;
47298- ACCESS_ONCE(ads->ds_ctl3) = 0;
47299+ ACCESS_ONCE_RW(ads->ds_ctl2) = 0;
47300+ ACCESS_ONCE_RW(ads->ds_ctl3) = 0;
47301 }
47302
47303 if (!i->is_first) {
47304- ACCESS_ONCE(ads->ds_ctl0) = 0;
47305- ACCESS_ONCE(ads->ds_ctl1) = ctl1;
47306- ACCESS_ONCE(ads->ds_ctl6) = ctl6;
47307+ ACCESS_ONCE_RW(ads->ds_ctl0) = 0;
47308+ ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
47309+ ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
47310 return;
47311 }
47312
47313@@ -279,7 +279,7 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
47314 break;
47315 }
47316
47317- ACCESS_ONCE(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
47318+ ACCESS_ONCE_RW(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
47319 | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
47320 | SM(i->txpower, AR_XmitPower)
47321 | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
47322@@ -289,19 +289,19 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
47323 | (i->flags & ATH9K_TXDESC_RTSENA ? AR_RTSEnable :
47324 (i->flags & ATH9K_TXDESC_CTSENA ? AR_CTSEnable : 0));
47325
47326- ACCESS_ONCE(ads->ds_ctl1) = ctl1;
47327- ACCESS_ONCE(ads->ds_ctl6) = ctl6;
47328+ ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
47329+ ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
47330
47331 if (i->aggr == AGGR_BUF_MIDDLE || i->aggr == AGGR_BUF_LAST)
47332 return;
47333
47334- ACCESS_ONCE(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
47335+ ACCESS_ONCE_RW(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
47336 | set11nPktDurRTSCTS(i->rates, 1);
47337
47338- ACCESS_ONCE(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
47339+ ACCESS_ONCE_RW(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
47340 | set11nPktDurRTSCTS(i->rates, 3);
47341
47342- ACCESS_ONCE(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
47343+ ACCESS_ONCE_RW(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
47344 | set11nRateFlags(i->rates, 1)
47345 | set11nRateFlags(i->rates, 2)
47346 | set11nRateFlags(i->rates, 3)
47347diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
47348index 729ffbf..49f50e3 100644
47349--- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c
47350+++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
47351@@ -39,47 +39,47 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
47352 (i->qcu << AR_TxQcuNum_S) | desc_len;
47353
47354 checksum += val;
47355- ACCESS_ONCE(ads->info) = val;
47356+ ACCESS_ONCE_RW(ads->info) = val;
47357
47358 checksum += i->link;
47359- ACCESS_ONCE(ads->link) = i->link;
47360+ ACCESS_ONCE_RW(ads->link) = i->link;
47361
47362 checksum += i->buf_addr[0];
47363- ACCESS_ONCE(ads->data0) = i->buf_addr[0];
47364+ ACCESS_ONCE_RW(ads->data0) = i->buf_addr[0];
47365 checksum += i->buf_addr[1];
47366- ACCESS_ONCE(ads->data1) = i->buf_addr[1];
47367+ ACCESS_ONCE_RW(ads->data1) = i->buf_addr[1];
47368 checksum += i->buf_addr[2];
47369- ACCESS_ONCE(ads->data2) = i->buf_addr[2];
47370+ ACCESS_ONCE_RW(ads->data2) = i->buf_addr[2];
47371 checksum += i->buf_addr[3];
47372- ACCESS_ONCE(ads->data3) = i->buf_addr[3];
47373+ ACCESS_ONCE_RW(ads->data3) = i->buf_addr[3];
47374
47375 checksum += (val = (i->buf_len[0] << AR_BufLen_S) & AR_BufLen);
47376- ACCESS_ONCE(ads->ctl3) = val;
47377+ ACCESS_ONCE_RW(ads->ctl3) = val;
47378 checksum += (val = (i->buf_len[1] << AR_BufLen_S) & AR_BufLen);
47379- ACCESS_ONCE(ads->ctl5) = val;
47380+ ACCESS_ONCE_RW(ads->ctl5) = val;
47381 checksum += (val = (i->buf_len[2] << AR_BufLen_S) & AR_BufLen);
47382- ACCESS_ONCE(ads->ctl7) = val;
47383+ ACCESS_ONCE_RW(ads->ctl7) = val;
47384 checksum += (val = (i->buf_len[3] << AR_BufLen_S) & AR_BufLen);
47385- ACCESS_ONCE(ads->ctl9) = val;
47386+ ACCESS_ONCE_RW(ads->ctl9) = val;
47387
47388 checksum = (u16) (((checksum & 0xffff) + (checksum >> 16)) & 0xffff);
47389- ACCESS_ONCE(ads->ctl10) = checksum;
47390+ ACCESS_ONCE_RW(ads->ctl10) = checksum;
47391
47392 if (i->is_first || i->is_last) {
47393- ACCESS_ONCE(ads->ctl13) = set11nTries(i->rates, 0)
47394+ ACCESS_ONCE_RW(ads->ctl13) = set11nTries(i->rates, 0)
47395 | set11nTries(i->rates, 1)
47396 | set11nTries(i->rates, 2)
47397 | set11nTries(i->rates, 3)
47398 | (i->dur_update ? AR_DurUpdateEna : 0)
47399 | SM(0, AR_BurstDur);
47400
47401- ACCESS_ONCE(ads->ctl14) = set11nRate(i->rates, 0)
47402+ ACCESS_ONCE_RW(ads->ctl14) = set11nRate(i->rates, 0)
47403 | set11nRate(i->rates, 1)
47404 | set11nRate(i->rates, 2)
47405 | set11nRate(i->rates, 3);
47406 } else {
47407- ACCESS_ONCE(ads->ctl13) = 0;
47408- ACCESS_ONCE(ads->ctl14) = 0;
47409+ ACCESS_ONCE_RW(ads->ctl13) = 0;
47410+ ACCESS_ONCE_RW(ads->ctl14) = 0;
47411 }
47412
47413 ads->ctl20 = 0;
47414@@ -89,17 +89,17 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
47415
47416 ctl17 = SM(i->keytype, AR_EncrType);
47417 if (!i->is_first) {
47418- ACCESS_ONCE(ads->ctl11) = 0;
47419- ACCESS_ONCE(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
47420- ACCESS_ONCE(ads->ctl15) = 0;
47421- ACCESS_ONCE(ads->ctl16) = 0;
47422- ACCESS_ONCE(ads->ctl17) = ctl17;
47423- ACCESS_ONCE(ads->ctl18) = 0;
47424- ACCESS_ONCE(ads->ctl19) = 0;
47425+ ACCESS_ONCE_RW(ads->ctl11) = 0;
47426+ ACCESS_ONCE_RW(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
47427+ ACCESS_ONCE_RW(ads->ctl15) = 0;
47428+ ACCESS_ONCE_RW(ads->ctl16) = 0;
47429+ ACCESS_ONCE_RW(ads->ctl17) = ctl17;
47430+ ACCESS_ONCE_RW(ads->ctl18) = 0;
47431+ ACCESS_ONCE_RW(ads->ctl19) = 0;
47432 return;
47433 }
47434
47435- ACCESS_ONCE(ads->ctl11) = (i->pkt_len & AR_FrameLen)
47436+ ACCESS_ONCE_RW(ads->ctl11) = (i->pkt_len & AR_FrameLen)
47437 | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
47438 | SM(i->txpower, AR_XmitPower)
47439 | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
47440@@ -135,22 +135,22 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
47441 val = (i->flags & ATH9K_TXDESC_PAPRD) >> ATH9K_TXDESC_PAPRD_S;
47442 ctl12 |= SM(val, AR_PAPRDChainMask);
47443
47444- ACCESS_ONCE(ads->ctl12) = ctl12;
47445- ACCESS_ONCE(ads->ctl17) = ctl17;
47446+ ACCESS_ONCE_RW(ads->ctl12) = ctl12;
47447+ ACCESS_ONCE_RW(ads->ctl17) = ctl17;
47448
47449- ACCESS_ONCE(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
47450+ ACCESS_ONCE_RW(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
47451 | set11nPktDurRTSCTS(i->rates, 1);
47452
47453- ACCESS_ONCE(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
47454+ ACCESS_ONCE_RW(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
47455 | set11nPktDurRTSCTS(i->rates, 3);
47456
47457- ACCESS_ONCE(ads->ctl18) = set11nRateFlags(i->rates, 0)
47458+ ACCESS_ONCE_RW(ads->ctl18) = set11nRateFlags(i->rates, 0)
47459 | set11nRateFlags(i->rates, 1)
47460 | set11nRateFlags(i->rates, 2)
47461 | set11nRateFlags(i->rates, 3)
47462 | SM(i->rtscts_rate, AR_RTSCTSRate);
47463
47464- ACCESS_ONCE(ads->ctl19) = AR_Not_Sounding;
47465+ ACCESS_ONCE_RW(ads->ctl19) = AR_Not_Sounding;
47466 }
47467
47468 static u16 ar9003_calc_ptr_chksum(struct ar9003_txc *ads)
47469diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
47470index 0acd4b5..0591c91 100644
47471--- a/drivers/net/wireless/ath/ath9k/hw.h
47472+++ b/drivers/net/wireless/ath/ath9k/hw.h
47473@@ -629,7 +629,7 @@ struct ath_hw_private_ops {
47474
47475 /* ANI */
47476 void (*ani_cache_ini_regs)(struct ath_hw *ah);
47477-};
47478+} __no_const;
47479
47480 /**
47481 * struct ath_spec_scan - parameters for Atheros spectral scan
47482@@ -706,7 +706,7 @@ struct ath_hw_ops {
47483 #ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
47484 void (*set_bt_ant_diversity)(struct ath_hw *hw, bool enable);
47485 #endif
47486-};
47487+} __no_const;
47488
47489 struct ath_nf_limits {
47490 s16 max;
47491diff --git a/drivers/net/wireless/b43/phy_lp.c b/drivers/net/wireless/b43/phy_lp.c
47492index 92190da..f3a4c4c 100644
47493--- a/drivers/net/wireless/b43/phy_lp.c
47494+++ b/drivers/net/wireless/b43/phy_lp.c
47495@@ -2514,7 +2514,7 @@ static int lpphy_b2063_tune(struct b43_wldev *dev,
47496 {
47497 struct ssb_bus *bus = dev->dev->sdev->bus;
47498
47499- static const struct b206x_channel *chandata = NULL;
47500+ const struct b206x_channel *chandata = NULL;
47501 u32 crystal_freq = bus->chipco.pmu.crystalfreq * 1000;
47502 u32 freqref, vco_freq, val1, val2, val3, timeout, timeoutref, count;
47503 u16 old_comm15, scale;
47504diff --git a/drivers/net/wireless/iwlegacy/3945-mac.c b/drivers/net/wireless/iwlegacy/3945-mac.c
47505index 0487461..fd9e84a 100644
47506--- a/drivers/net/wireless/iwlegacy/3945-mac.c
47507+++ b/drivers/net/wireless/iwlegacy/3945-mac.c
47508@@ -3638,7 +3638,9 @@ il3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
47509 */
47510 if (il3945_mod_params.disable_hw_scan) {
47511 D_INFO("Disabling hw_scan\n");
47512- il3945_mac_ops.hw_scan = NULL;
47513+ pax_open_kernel();
47514+ *(void **)&il3945_mac_ops.hw_scan = NULL;
47515+ pax_close_kernel();
47516 }
47517
47518 D_INFO("*** LOAD DRIVER ***\n");
47519diff --git a/drivers/net/wireless/iwlwifi/dvm/debugfs.c b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
47520index d2fe259..0c4c682 100644
47521--- a/drivers/net/wireless/iwlwifi/dvm/debugfs.c
47522+++ b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
47523@@ -188,7 +188,7 @@ static ssize_t iwl_dbgfs_sram_write(struct file *file,
47524 {
47525 struct iwl_priv *priv = file->private_data;
47526 char buf[64];
47527- int buf_size;
47528+ size_t buf_size;
47529 u32 offset, len;
47530
47531 memset(buf, 0, sizeof(buf));
47532@@ -458,7 +458,7 @@ static ssize_t iwl_dbgfs_rx_handlers_write(struct file *file,
47533 struct iwl_priv *priv = file->private_data;
47534
47535 char buf[8];
47536- int buf_size;
47537+ size_t buf_size;
47538 u32 reset_flag;
47539
47540 memset(buf, 0, sizeof(buf));
47541@@ -539,7 +539,7 @@ static ssize_t iwl_dbgfs_disable_ht40_write(struct file *file,
47542 {
47543 struct iwl_priv *priv = file->private_data;
47544 char buf[8];
47545- int buf_size;
47546+ size_t buf_size;
47547 int ht40;
47548
47549 memset(buf, 0, sizeof(buf));
47550@@ -591,7 +591,7 @@ static ssize_t iwl_dbgfs_sleep_level_override_write(struct file *file,
47551 {
47552 struct iwl_priv *priv = file->private_data;
47553 char buf[8];
47554- int buf_size;
47555+ size_t buf_size;
47556 int value;
47557
47558 memset(buf, 0, sizeof(buf));
47559@@ -683,10 +683,10 @@ DEBUGFS_READ_FILE_OPS(temperature);
47560 DEBUGFS_READ_WRITE_FILE_OPS(sleep_level_override);
47561 DEBUGFS_READ_FILE_OPS(current_sleep_command);
47562
47563-static const char *fmt_value = " %-30s %10u\n";
47564-static const char *fmt_hex = " %-30s 0x%02X\n";
47565-static const char *fmt_table = " %-30s %10u %10u %10u %10u\n";
47566-static const char *fmt_header =
47567+static const char fmt_value[] = " %-30s %10u\n";
47568+static const char fmt_hex[] = " %-30s 0x%02X\n";
47569+static const char fmt_table[] = " %-30s %10u %10u %10u %10u\n";
47570+static const char fmt_header[] =
47571 "%-32s current cumulative delta max\n";
47572
47573 static int iwl_statistics_flag(struct iwl_priv *priv, char *buf, int bufsz)
47574@@ -1856,7 +1856,7 @@ static ssize_t iwl_dbgfs_clear_ucode_statistics_write(struct file *file,
47575 {
47576 struct iwl_priv *priv = file->private_data;
47577 char buf[8];
47578- int buf_size;
47579+ size_t buf_size;
47580 int clear;
47581
47582 memset(buf, 0, sizeof(buf));
47583@@ -1901,7 +1901,7 @@ static ssize_t iwl_dbgfs_ucode_tracing_write(struct file *file,
47584 {
47585 struct iwl_priv *priv = file->private_data;
47586 char buf[8];
47587- int buf_size;
47588+ size_t buf_size;
47589 int trace;
47590
47591 memset(buf, 0, sizeof(buf));
47592@@ -1972,7 +1972,7 @@ static ssize_t iwl_dbgfs_missed_beacon_write(struct file *file,
47593 {
47594 struct iwl_priv *priv = file->private_data;
47595 char buf[8];
47596- int buf_size;
47597+ size_t buf_size;
47598 int missed;
47599
47600 memset(buf, 0, sizeof(buf));
47601@@ -2013,7 +2013,7 @@ static ssize_t iwl_dbgfs_plcp_delta_write(struct file *file,
47602
47603 struct iwl_priv *priv = file->private_data;
47604 char buf[8];
47605- int buf_size;
47606+ size_t buf_size;
47607 int plcp;
47608
47609 memset(buf, 0, sizeof(buf));
47610@@ -2073,7 +2073,7 @@ static ssize_t iwl_dbgfs_txfifo_flush_write(struct file *file,
47611
47612 struct iwl_priv *priv = file->private_data;
47613 char buf[8];
47614- int buf_size;
47615+ size_t buf_size;
47616 int flush;
47617
47618 memset(buf, 0, sizeof(buf));
47619@@ -2163,7 +2163,7 @@ static ssize_t iwl_dbgfs_protection_mode_write(struct file *file,
47620
47621 struct iwl_priv *priv = file->private_data;
47622 char buf[8];
47623- int buf_size;
47624+ size_t buf_size;
47625 int rts;
47626
47627 if (!priv->cfg->ht_params)
47628@@ -2205,7 +2205,7 @@ static ssize_t iwl_dbgfs_echo_test_write(struct file *file,
47629 {
47630 struct iwl_priv *priv = file->private_data;
47631 char buf[8];
47632- int buf_size;
47633+ size_t buf_size;
47634
47635 memset(buf, 0, sizeof(buf));
47636 buf_size = min(count, sizeof(buf) - 1);
47637@@ -2239,7 +2239,7 @@ static ssize_t iwl_dbgfs_log_event_write(struct file *file,
47638 struct iwl_priv *priv = file->private_data;
47639 u32 event_log_flag;
47640 char buf[8];
47641- int buf_size;
47642+ size_t buf_size;
47643
47644 /* check that the interface is up */
47645 if (!iwl_is_ready(priv))
47646@@ -2293,7 +2293,7 @@ static ssize_t iwl_dbgfs_calib_disabled_write(struct file *file,
47647 struct iwl_priv *priv = file->private_data;
47648 char buf[8];
47649 u32 calib_disabled;
47650- int buf_size;
47651+ size_t buf_size;
47652
47653 memset(buf, 0, sizeof(buf));
47654 buf_size = min(count, sizeof(buf) - 1);
47655diff --git a/drivers/net/wireless/iwlwifi/dvm/main.c b/drivers/net/wireless/iwlwifi/dvm/main.c
47656index ea7e70c..bc0c45f 100644
47657--- a/drivers/net/wireless/iwlwifi/dvm/main.c
47658+++ b/drivers/net/wireless/iwlwifi/dvm/main.c
47659@@ -1127,7 +1127,7 @@ static void iwl_option_config(struct iwl_priv *priv)
47660 static int iwl_eeprom_init_hw_params(struct iwl_priv *priv)
47661 {
47662 struct iwl_nvm_data *data = priv->nvm_data;
47663- char *debug_msg;
47664+ static const char debug_msg[] = "Device SKU: 24GHz %s %s, 52GHz %s %s, 11.n %s %s\n";
47665
47666 if (data->sku_cap_11n_enable &&
47667 !priv->cfg->ht_params) {
47668@@ -1141,7 +1141,6 @@ static int iwl_eeprom_init_hw_params(struct iwl_priv *priv)
47669 return -EINVAL;
47670 }
47671
47672- debug_msg = "Device SKU: 24GHz %s %s, 52GHz %s %s, 11.n %s %s\n";
47673 IWL_DEBUG_INFO(priv, debug_msg,
47674 data->sku_cap_band_24GHz_enable ? "" : "NOT", "enabled",
47675 data->sku_cap_band_52GHz_enable ? "" : "NOT", "enabled",
47676diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
47677index 8d42fd9..d923d65 100644
47678--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
47679+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
47680@@ -1365,7 +1365,7 @@ static ssize_t iwl_dbgfs_interrupt_write(struct file *file,
47681 struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
47682
47683 char buf[8];
47684- int buf_size;
47685+ size_t buf_size;
47686 u32 reset_flag;
47687
47688 memset(buf, 0, sizeof(buf));
47689@@ -1386,7 +1386,7 @@ static ssize_t iwl_dbgfs_csr_write(struct file *file,
47690 {
47691 struct iwl_trans *trans = file->private_data;
47692 char buf[8];
47693- int buf_size;
47694+ size_t buf_size;
47695 int csr;
47696
47697 memset(buf, 0, sizeof(buf));
47698diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
47699index 69d4c31..bd0b316 100644
47700--- a/drivers/net/wireless/mac80211_hwsim.c
47701+++ b/drivers/net/wireless/mac80211_hwsim.c
47702@@ -2541,20 +2541,20 @@ static int __init init_mac80211_hwsim(void)
47703 if (channels < 1)
47704 return -EINVAL;
47705
47706- mac80211_hwsim_mchan_ops = mac80211_hwsim_ops;
47707- mac80211_hwsim_mchan_ops.hw_scan = mac80211_hwsim_hw_scan;
47708- mac80211_hwsim_mchan_ops.cancel_hw_scan = mac80211_hwsim_cancel_hw_scan;
47709- mac80211_hwsim_mchan_ops.sw_scan_start = NULL;
47710- mac80211_hwsim_mchan_ops.sw_scan_complete = NULL;
47711- mac80211_hwsim_mchan_ops.remain_on_channel = mac80211_hwsim_roc;
47712- mac80211_hwsim_mchan_ops.cancel_remain_on_channel = mac80211_hwsim_croc;
47713- mac80211_hwsim_mchan_ops.add_chanctx = mac80211_hwsim_add_chanctx;
47714- mac80211_hwsim_mchan_ops.remove_chanctx = mac80211_hwsim_remove_chanctx;
47715- mac80211_hwsim_mchan_ops.change_chanctx = mac80211_hwsim_change_chanctx;
47716- mac80211_hwsim_mchan_ops.assign_vif_chanctx =
47717- mac80211_hwsim_assign_vif_chanctx;
47718- mac80211_hwsim_mchan_ops.unassign_vif_chanctx =
47719- mac80211_hwsim_unassign_vif_chanctx;
47720+ pax_open_kernel();
47721+ memcpy((void *)&mac80211_hwsim_mchan_ops, &mac80211_hwsim_ops, sizeof mac80211_hwsim_mchan_ops);
47722+ *(void **)&mac80211_hwsim_mchan_ops.hw_scan = mac80211_hwsim_hw_scan;
47723+ *(void **)&mac80211_hwsim_mchan_ops.cancel_hw_scan = mac80211_hwsim_cancel_hw_scan;
47724+ *(void **)&mac80211_hwsim_mchan_ops.sw_scan_start = NULL;
47725+ *(void **)&mac80211_hwsim_mchan_ops.sw_scan_complete = NULL;
47726+ *(void **)&mac80211_hwsim_mchan_ops.remain_on_channel = mac80211_hwsim_roc;
47727+ *(void **)&mac80211_hwsim_mchan_ops.cancel_remain_on_channel = mac80211_hwsim_croc;
47728+ *(void **)&mac80211_hwsim_mchan_ops.add_chanctx = mac80211_hwsim_add_chanctx;
47729+ *(void **)&mac80211_hwsim_mchan_ops.remove_chanctx = mac80211_hwsim_remove_chanctx;
47730+ *(void **)&mac80211_hwsim_mchan_ops.change_chanctx = mac80211_hwsim_change_chanctx;
47731+ *(void **)&mac80211_hwsim_mchan_ops.assign_vif_chanctx = mac80211_hwsim_assign_vif_chanctx;
47732+ *(void **)&mac80211_hwsim_mchan_ops.unassign_vif_chanctx = mac80211_hwsim_unassign_vif_chanctx;
47733+ pax_close_kernel();
47734
47735 spin_lock_init(&hwsim_radio_lock);
47736 INIT_LIST_HEAD(&hwsim_radios);
47737diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
47738index 5028557..91cf394 100644
47739--- a/drivers/net/wireless/rndis_wlan.c
47740+++ b/drivers/net/wireless/rndis_wlan.c
47741@@ -1236,7 +1236,7 @@ static int set_rts_threshold(struct usbnet *usbdev, u32 rts_threshold)
47742
47743 netdev_dbg(usbdev->net, "%s(): %i\n", __func__, rts_threshold);
47744
47745- if (rts_threshold < 0 || rts_threshold > 2347)
47746+ if (rts_threshold > 2347)
47747 rts_threshold = 2347;
47748
47749 tmp = cpu_to_le32(rts_threshold);
47750diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
47751index e3b885d..7a7de2f 100644
47752--- a/drivers/net/wireless/rt2x00/rt2x00.h
47753+++ b/drivers/net/wireless/rt2x00/rt2x00.h
47754@@ -375,7 +375,7 @@ struct rt2x00_intf {
47755 * for hardware which doesn't support hardware
47756 * sequence counting.
47757 */
47758- atomic_t seqno;
47759+ atomic_unchecked_t seqno;
47760 };
47761
47762 static inline struct rt2x00_intf* vif_to_intf(struct ieee80211_vif *vif)
47763diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
47764index 5642ccc..01f03eb 100644
47765--- a/drivers/net/wireless/rt2x00/rt2x00queue.c
47766+++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
47767@@ -250,9 +250,9 @@ static void rt2x00queue_create_tx_descriptor_seq(struct rt2x00_dev *rt2x00dev,
47768 * sequence counter given by mac80211.
47769 */
47770 if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags))
47771- seqno = atomic_add_return(0x10, &intf->seqno);
47772+ seqno = atomic_add_return_unchecked(0x10, &intf->seqno);
47773 else
47774- seqno = atomic_read(&intf->seqno);
47775+ seqno = atomic_read_unchecked(&intf->seqno);
47776
47777 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
47778 hdr->seq_ctrl |= cpu_to_le16(seqno);
47779diff --git a/drivers/net/wireless/ti/wl1251/sdio.c b/drivers/net/wireless/ti/wl1251/sdio.c
47780index e2b3d9c..67a5184 100644
47781--- a/drivers/net/wireless/ti/wl1251/sdio.c
47782+++ b/drivers/net/wireless/ti/wl1251/sdio.c
47783@@ -271,13 +271,17 @@ static int wl1251_sdio_probe(struct sdio_func *func,
47784
47785 irq_set_irq_type(wl->irq, IRQ_TYPE_EDGE_RISING);
47786
47787- wl1251_sdio_ops.enable_irq = wl1251_enable_line_irq;
47788- wl1251_sdio_ops.disable_irq = wl1251_disable_line_irq;
47789+ pax_open_kernel();
47790+ *(void **)&wl1251_sdio_ops.enable_irq = wl1251_enable_line_irq;
47791+ *(void **)&wl1251_sdio_ops.disable_irq = wl1251_disable_line_irq;
47792+ pax_close_kernel();
47793
47794 wl1251_info("using dedicated interrupt line");
47795 } else {
47796- wl1251_sdio_ops.enable_irq = wl1251_sdio_enable_irq;
47797- wl1251_sdio_ops.disable_irq = wl1251_sdio_disable_irq;
47798+ pax_open_kernel();
47799+ *(void **)&wl1251_sdio_ops.enable_irq = wl1251_sdio_enable_irq;
47800+ *(void **)&wl1251_sdio_ops.disable_irq = wl1251_sdio_disable_irq;
47801+ pax_close_kernel();
47802
47803 wl1251_info("using SDIO interrupt");
47804 }
47805diff --git a/drivers/net/wireless/ti/wl12xx/main.c b/drivers/net/wireless/ti/wl12xx/main.c
47806index be7129b..4161356 100644
47807--- a/drivers/net/wireless/ti/wl12xx/main.c
47808+++ b/drivers/net/wireless/ti/wl12xx/main.c
47809@@ -656,7 +656,9 @@ static int wl12xx_identify_chip(struct wl1271 *wl)
47810 sizeof(wl->conf.mem));
47811
47812 /* read data preparation is only needed by wl127x */
47813- wl->ops->prepare_read = wl127x_prepare_read;
47814+ pax_open_kernel();
47815+ *(void **)&wl->ops->prepare_read = wl127x_prepare_read;
47816+ pax_close_kernel();
47817
47818 wlcore_set_min_fw_ver(wl, WL127X_CHIP_VER,
47819 WL127X_IFTYPE_SR_VER, WL127X_MAJOR_SR_VER,
47820@@ -681,7 +683,9 @@ static int wl12xx_identify_chip(struct wl1271 *wl)
47821 sizeof(wl->conf.mem));
47822
47823 /* read data preparation is only needed by wl127x */
47824- wl->ops->prepare_read = wl127x_prepare_read;
47825+ pax_open_kernel();
47826+ *(void **)&wl->ops->prepare_read = wl127x_prepare_read;
47827+ pax_close_kernel();
47828
47829 wlcore_set_min_fw_ver(wl, WL127X_CHIP_VER,
47830 WL127X_IFTYPE_SR_VER, WL127X_MAJOR_SR_VER,
47831diff --git a/drivers/net/wireless/ti/wl18xx/main.c b/drivers/net/wireless/ti/wl18xx/main.c
47832index ec37b16..7e34d66 100644
47833--- a/drivers/net/wireless/ti/wl18xx/main.c
47834+++ b/drivers/net/wireless/ti/wl18xx/main.c
47835@@ -1823,8 +1823,10 @@ static int wl18xx_setup(struct wl1271 *wl)
47836 }
47837
47838 if (!checksum_param) {
47839- wl18xx_ops.set_rx_csum = NULL;
47840- wl18xx_ops.init_vif = NULL;
47841+ pax_open_kernel();
47842+ *(void **)&wl18xx_ops.set_rx_csum = NULL;
47843+ *(void **)&wl18xx_ops.init_vif = NULL;
47844+ pax_close_kernel();
47845 }
47846
47847 /* Enable 11a Band only if we have 5G antennas */
47848diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c
47849index a912dc0..a8225ba 100644
47850--- a/drivers/net/wireless/zd1211rw/zd_usb.c
47851+++ b/drivers/net/wireless/zd1211rw/zd_usb.c
47852@@ -385,7 +385,7 @@ static inline void handle_regs_int(struct urb *urb)
47853 {
47854 struct zd_usb *usb = urb->context;
47855 struct zd_usb_interrupt *intr = &usb->intr;
47856- int len;
47857+ unsigned int len;
47858 u16 int_num;
47859
47860 ZD_ASSERT(in_interrupt());
47861diff --git a/drivers/nfc/nfcwilink.c b/drivers/nfc/nfcwilink.c
47862index 683671a..4519fc2 100644
47863--- a/drivers/nfc/nfcwilink.c
47864+++ b/drivers/nfc/nfcwilink.c
47865@@ -497,7 +497,7 @@ static struct nci_ops nfcwilink_ops = {
47866
47867 static int nfcwilink_probe(struct platform_device *pdev)
47868 {
47869- static struct nfcwilink *drv;
47870+ struct nfcwilink *drv;
47871 int rc;
47872 __u32 protocols;
47873
47874diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c
47875index d93b2b6..ae50401 100644
47876--- a/drivers/oprofile/buffer_sync.c
47877+++ b/drivers/oprofile/buffer_sync.c
47878@@ -332,7 +332,7 @@ static void add_data(struct op_entry *entry, struct mm_struct *mm)
47879 if (cookie == NO_COOKIE)
47880 offset = pc;
47881 if (cookie == INVALID_COOKIE) {
47882- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
47883+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
47884 offset = pc;
47885 }
47886 if (cookie != last_cookie) {
47887@@ -376,14 +376,14 @@ add_sample(struct mm_struct *mm, struct op_sample *s, int in_kernel)
47888 /* add userspace sample */
47889
47890 if (!mm) {
47891- atomic_inc(&oprofile_stats.sample_lost_no_mm);
47892+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
47893 return 0;
47894 }
47895
47896 cookie = lookup_dcookie(mm, s->eip, &offset);
47897
47898 if (cookie == INVALID_COOKIE) {
47899- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
47900+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
47901 return 0;
47902 }
47903
47904@@ -552,7 +552,7 @@ void sync_buffer(int cpu)
47905 /* ignore backtraces if failed to add a sample */
47906 if (state == sb_bt_start) {
47907 state = sb_bt_ignore;
47908- atomic_inc(&oprofile_stats.bt_lost_no_mapping);
47909+ atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
47910 }
47911 }
47912 release_mm(mm);
47913diff --git a/drivers/oprofile/event_buffer.c b/drivers/oprofile/event_buffer.c
47914index c0cc4e7..44d4e54 100644
47915--- a/drivers/oprofile/event_buffer.c
47916+++ b/drivers/oprofile/event_buffer.c
47917@@ -53,7 +53,7 @@ void add_event_entry(unsigned long value)
47918 }
47919
47920 if (buffer_pos == buffer_size) {
47921- atomic_inc(&oprofile_stats.event_lost_overflow);
47922+ atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
47923 return;
47924 }
47925
47926diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c
47927index ed2c3ec..deda85a 100644
47928--- a/drivers/oprofile/oprof.c
47929+++ b/drivers/oprofile/oprof.c
47930@@ -110,7 +110,7 @@ static void switch_worker(struct work_struct *work)
47931 if (oprofile_ops.switch_events())
47932 return;
47933
47934- atomic_inc(&oprofile_stats.multiplex_counter);
47935+ atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
47936 start_switch_worker();
47937 }
47938
47939diff --git a/drivers/oprofile/oprofile_files.c b/drivers/oprofile/oprofile_files.c
47940index ee2cfce..7f8f699 100644
47941--- a/drivers/oprofile/oprofile_files.c
47942+++ b/drivers/oprofile/oprofile_files.c
47943@@ -27,7 +27,7 @@ unsigned long oprofile_time_slice;
47944
47945 #ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
47946
47947-static ssize_t timeout_read(struct file *file, char __user *buf,
47948+static ssize_t __intentional_overflow(-1) timeout_read(struct file *file, char __user *buf,
47949 size_t count, loff_t *offset)
47950 {
47951 return oprofilefs_ulong_to_user(jiffies_to_msecs(oprofile_time_slice),
47952diff --git a/drivers/oprofile/oprofile_stats.c b/drivers/oprofile/oprofile_stats.c
47953index 59659ce..6c860a0 100644
47954--- a/drivers/oprofile/oprofile_stats.c
47955+++ b/drivers/oprofile/oprofile_stats.c
47956@@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
47957 cpu_buf->sample_invalid_eip = 0;
47958 }
47959
47960- atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
47961- atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
47962- atomic_set(&oprofile_stats.event_lost_overflow, 0);
47963- atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
47964- atomic_set(&oprofile_stats.multiplex_counter, 0);
47965+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
47966+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
47967+ atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
47968+ atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
47969+ atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
47970 }
47971
47972
47973diff --git a/drivers/oprofile/oprofile_stats.h b/drivers/oprofile/oprofile_stats.h
47974index 1fc622b..8c48fc3 100644
47975--- a/drivers/oprofile/oprofile_stats.h
47976+++ b/drivers/oprofile/oprofile_stats.h
47977@@ -13,11 +13,11 @@
47978 #include <linux/atomic.h>
47979
47980 struct oprofile_stat_struct {
47981- atomic_t sample_lost_no_mm;
47982- atomic_t sample_lost_no_mapping;
47983- atomic_t bt_lost_no_mapping;
47984- atomic_t event_lost_overflow;
47985- atomic_t multiplex_counter;
47986+ atomic_unchecked_t sample_lost_no_mm;
47987+ atomic_unchecked_t sample_lost_no_mapping;
47988+ atomic_unchecked_t bt_lost_no_mapping;
47989+ atomic_unchecked_t event_lost_overflow;
47990+ atomic_unchecked_t multiplex_counter;
47991 };
47992
47993 extern struct oprofile_stat_struct oprofile_stats;
47994diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c
47995index 3f49345..c750d0b 100644
47996--- a/drivers/oprofile/oprofilefs.c
47997+++ b/drivers/oprofile/oprofilefs.c
47998@@ -176,8 +176,8 @@ int oprofilefs_create_ro_ulong(struct dentry *root,
47999
48000 static ssize_t atomic_read_file(struct file *file, char __user *buf, size_t count, loff_t *offset)
48001 {
48002- atomic_t *val = file->private_data;
48003- return oprofilefs_ulong_to_user(atomic_read(val), buf, count, offset);
48004+ atomic_unchecked_t *val = file->private_data;
48005+ return oprofilefs_ulong_to_user(atomic_read_unchecked(val), buf, count, offset);
48006 }
48007
48008
48009@@ -189,7 +189,7 @@ static const struct file_operations atomic_ro_fops = {
48010
48011
48012 int oprofilefs_create_ro_atomic(struct dentry *root,
48013- char const *name, atomic_t *val)
48014+ char const *name, atomic_unchecked_t *val)
48015 {
48016 return __oprofilefs_create_file(root, name,
48017 &atomic_ro_fops, 0444, val);
48018diff --git a/drivers/oprofile/timer_int.c b/drivers/oprofile/timer_int.c
48019index 61be1d9..dec05d7 100644
48020--- a/drivers/oprofile/timer_int.c
48021+++ b/drivers/oprofile/timer_int.c
48022@@ -93,7 +93,7 @@ static int oprofile_cpu_notify(struct notifier_block *self,
48023 return NOTIFY_OK;
48024 }
48025
48026-static struct notifier_block __refdata oprofile_cpu_notifier = {
48027+static struct notifier_block oprofile_cpu_notifier = {
48028 .notifier_call = oprofile_cpu_notify,
48029 };
48030
48031diff --git a/drivers/parport/procfs.c b/drivers/parport/procfs.c
48032index 92ed045..62d39bd7 100644
48033--- a/drivers/parport/procfs.c
48034+++ b/drivers/parport/procfs.c
48035@@ -64,7 +64,7 @@ static int do_active_device(ctl_table *table, int write,
48036
48037 *ppos += len;
48038
48039- return copy_to_user(result, buffer, len) ? -EFAULT : 0;
48040+ return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
48041 }
48042
48043 #ifdef CONFIG_PARPORT_1284
48044@@ -106,7 +106,7 @@ static int do_autoprobe(ctl_table *table, int write,
48045
48046 *ppos += len;
48047
48048- return copy_to_user (result, buffer, len) ? -EFAULT : 0;
48049+ return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
48050 }
48051 #endif /* IEEE1284.3 support. */
48052
48053diff --git a/drivers/pci/hotplug/acpiphp_ibm.c b/drivers/pci/hotplug/acpiphp_ibm.c
48054index 8dcccff..35d701d 100644
48055--- a/drivers/pci/hotplug/acpiphp_ibm.c
48056+++ b/drivers/pci/hotplug/acpiphp_ibm.c
48057@@ -452,7 +452,9 @@ static int __init ibm_acpiphp_init(void)
48058 goto init_cleanup;
48059 }
48060
48061- ibm_apci_table_attr.size = ibm_get_table_from_acpi(NULL);
48062+ pax_open_kernel();
48063+ *(size_t *)&ibm_apci_table_attr.size = ibm_get_table_from_acpi(NULL);
48064+ pax_close_kernel();
48065 retval = sysfs_create_bin_file(sysdir, &ibm_apci_table_attr);
48066
48067 return retval;
48068diff --git a/drivers/pci/hotplug/cpcihp_generic.c b/drivers/pci/hotplug/cpcihp_generic.c
48069index 7536eef..52dc8fa 100644
48070--- a/drivers/pci/hotplug/cpcihp_generic.c
48071+++ b/drivers/pci/hotplug/cpcihp_generic.c
48072@@ -73,7 +73,6 @@ static u16 port;
48073 static unsigned int enum_bit;
48074 static u8 enum_mask;
48075
48076-static struct cpci_hp_controller_ops generic_hpc_ops;
48077 static struct cpci_hp_controller generic_hpc;
48078
48079 static int __init validate_parameters(void)
48080@@ -139,6 +138,10 @@ static int query_enum(void)
48081 return ((value & enum_mask) == enum_mask);
48082 }
48083
48084+static struct cpci_hp_controller_ops generic_hpc_ops = {
48085+ .query_enum = query_enum,
48086+};
48087+
48088 static int __init cpcihp_generic_init(void)
48089 {
48090 int status;
48091@@ -165,7 +168,6 @@ static int __init cpcihp_generic_init(void)
48092 pci_dev_put(dev);
48093
48094 memset(&generic_hpc, 0, sizeof (struct cpci_hp_controller));
48095- generic_hpc_ops.query_enum = query_enum;
48096 generic_hpc.ops = &generic_hpc_ops;
48097
48098 status = cpci_hp_register_controller(&generic_hpc);
48099diff --git a/drivers/pci/hotplug/cpcihp_zt5550.c b/drivers/pci/hotplug/cpcihp_zt5550.c
48100index e8c4a7c..7046f5c 100644
48101--- a/drivers/pci/hotplug/cpcihp_zt5550.c
48102+++ b/drivers/pci/hotplug/cpcihp_zt5550.c
48103@@ -59,7 +59,6 @@
48104 /* local variables */
48105 static bool debug;
48106 static bool poll;
48107-static struct cpci_hp_controller_ops zt5550_hpc_ops;
48108 static struct cpci_hp_controller zt5550_hpc;
48109
48110 /* Primary cPCI bus bridge device */
48111@@ -205,6 +204,10 @@ static int zt5550_hc_disable_irq(void)
48112 return 0;
48113 }
48114
48115+static struct cpci_hp_controller_ops zt5550_hpc_ops = {
48116+ .query_enum = zt5550_hc_query_enum,
48117+};
48118+
48119 static int zt5550_hc_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
48120 {
48121 int status;
48122@@ -216,16 +219,17 @@ static int zt5550_hc_init_one (struct pci_dev *pdev, const struct pci_device_id
48123 dbg("returned from zt5550_hc_config");
48124
48125 memset(&zt5550_hpc, 0, sizeof (struct cpci_hp_controller));
48126- zt5550_hpc_ops.query_enum = zt5550_hc_query_enum;
48127 zt5550_hpc.ops = &zt5550_hpc_ops;
48128 if(!poll) {
48129 zt5550_hpc.irq = hc_dev->irq;
48130 zt5550_hpc.irq_flags = IRQF_SHARED;
48131 zt5550_hpc.dev_id = hc_dev;
48132
48133- zt5550_hpc_ops.enable_irq = zt5550_hc_enable_irq;
48134- zt5550_hpc_ops.disable_irq = zt5550_hc_disable_irq;
48135- zt5550_hpc_ops.check_irq = zt5550_hc_check_irq;
48136+ pax_open_kernel();
48137+ *(void **)&zt5550_hpc_ops.enable_irq = zt5550_hc_enable_irq;
48138+ *(void **)&zt5550_hpc_ops.disable_irq = zt5550_hc_disable_irq;
48139+ *(void **)&zt5550_hpc_ops.check_irq = zt5550_hc_check_irq;
48140+ pax_open_kernel();
48141 } else {
48142 info("using ENUM# polling mode");
48143 }
48144diff --git a/drivers/pci/hotplug/cpqphp_nvram.c b/drivers/pci/hotplug/cpqphp_nvram.c
48145index 76ba8a1..20ca857 100644
48146--- a/drivers/pci/hotplug/cpqphp_nvram.c
48147+++ b/drivers/pci/hotplug/cpqphp_nvram.c
48148@@ -428,9 +428,13 @@ static u32 store_HRT (void __iomem *rom_start)
48149
48150 void compaq_nvram_init (void __iomem *rom_start)
48151 {
48152+
48153+#ifndef CONFIG_PAX_KERNEXEC
48154 if (rom_start) {
48155 compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
48156 }
48157+#endif
48158+
48159 dbg("int15 entry = %p\n", compaq_int15_entry_point);
48160
48161 /* initialize our int15 lock */
48162diff --git a/drivers/pci/hotplug/pci_hotplug_core.c b/drivers/pci/hotplug/pci_hotplug_core.c
48163index cfa92a9..29539c5 100644
48164--- a/drivers/pci/hotplug/pci_hotplug_core.c
48165+++ b/drivers/pci/hotplug/pci_hotplug_core.c
48166@@ -441,8 +441,10 @@ int __pci_hp_register(struct hotplug_slot *slot, struct pci_bus *bus,
48167 return -EINVAL;
48168 }
48169
48170- slot->ops->owner = owner;
48171- slot->ops->mod_name = mod_name;
48172+ pax_open_kernel();
48173+ *(struct module **)&slot->ops->owner = owner;
48174+ *(const char **)&slot->ops->mod_name = mod_name;
48175+ pax_close_kernel();
48176
48177 mutex_lock(&pci_hp_mutex);
48178 /*
48179diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
48180index 53b58de..4479896 100644
48181--- a/drivers/pci/hotplug/pciehp_core.c
48182+++ b/drivers/pci/hotplug/pciehp_core.c
48183@@ -92,7 +92,7 @@ static int init_slot(struct controller *ctrl)
48184 struct slot *slot = ctrl->slot;
48185 struct hotplug_slot *hotplug = NULL;
48186 struct hotplug_slot_info *info = NULL;
48187- struct hotplug_slot_ops *ops = NULL;
48188+ hotplug_slot_ops_no_const *ops = NULL;
48189 char name[SLOT_NAME_SIZE];
48190 int retval = -ENOMEM;
48191
48192diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
48193index 955ab79..d1df9c7 100644
48194--- a/drivers/pci/msi.c
48195+++ b/drivers/pci/msi.c
48196@@ -524,8 +524,8 @@ static int populate_msi_sysfs(struct pci_dev *pdev)
48197 {
48198 struct attribute **msi_attrs;
48199 struct attribute *msi_attr;
48200- struct device_attribute *msi_dev_attr;
48201- struct attribute_group *msi_irq_group;
48202+ device_attribute_no_const *msi_dev_attr;
48203+ attribute_group_no_const *msi_irq_group;
48204 const struct attribute_group **msi_irq_groups;
48205 struct msi_desc *entry;
48206 int ret = -ENOMEM;
48207@@ -589,7 +589,7 @@ error_attrs:
48208 count = 0;
48209 msi_attr = msi_attrs[count];
48210 while (msi_attr) {
48211- msi_dev_attr = container_of(msi_attr, struct device_attribute, attr);
48212+ msi_dev_attr = container_of(msi_attr, device_attribute_no_const, attr);
48213 kfree(msi_attr->name);
48214 kfree(msi_dev_attr);
48215 ++count;
48216diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
48217index 276ef9c..1d33a36 100644
48218--- a/drivers/pci/pci-sysfs.c
48219+++ b/drivers/pci/pci-sysfs.c
48220@@ -1112,7 +1112,7 @@ static int pci_create_attr(struct pci_dev *pdev, int num, int write_combine)
48221 {
48222 /* allocate attribute structure, piggyback attribute name */
48223 int name_len = write_combine ? 13 : 10;
48224- struct bin_attribute *res_attr;
48225+ bin_attribute_no_const *res_attr;
48226 int retval;
48227
48228 res_attr = kzalloc(sizeof(*res_attr) + name_len, GFP_ATOMIC);
48229@@ -1297,7 +1297,7 @@ static struct device_attribute reset_attr = __ATTR(reset, 0200, NULL, reset_stor
48230 static int pci_create_capabilities_sysfs(struct pci_dev *dev)
48231 {
48232 int retval;
48233- struct bin_attribute *attr;
48234+ bin_attribute_no_const *attr;
48235
48236 /* If the device has VPD, try to expose it in sysfs. */
48237 if (dev->vpd) {
48238@@ -1344,7 +1344,7 @@ int __must_check pci_create_sysfs_dev_files (struct pci_dev *pdev)
48239 {
48240 int retval;
48241 int rom_size = 0;
48242- struct bin_attribute *attr;
48243+ bin_attribute_no_const *attr;
48244
48245 if (!sysfs_initialized)
48246 return -EACCES;
48247diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
48248index 4df38df..b6bb7fe 100644
48249--- a/drivers/pci/pci.h
48250+++ b/drivers/pci/pci.h
48251@@ -93,7 +93,7 @@ struct pci_vpd_ops {
48252 struct pci_vpd {
48253 unsigned int len;
48254 const struct pci_vpd_ops *ops;
48255- struct bin_attribute *attr; /* descriptor for sysfs VPD entry */
48256+ bin_attribute_no_const *attr; /* descriptor for sysfs VPD entry */
48257 };
48258
48259 int pci_vpd_pci22_init(struct pci_dev *dev);
48260diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
48261index e1e7026..d28dd33 100644
48262--- a/drivers/pci/pcie/aspm.c
48263+++ b/drivers/pci/pcie/aspm.c
48264@@ -27,9 +27,9 @@
48265 #define MODULE_PARAM_PREFIX "pcie_aspm."
48266
48267 /* Note: those are not register definitions */
48268-#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
48269-#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
48270-#define ASPM_STATE_L1 (4) /* L1 state */
48271+#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */
48272+#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */
48273+#define ASPM_STATE_L1 (4U) /* L1 state */
48274 #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
48275 #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
48276
48277diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
48278index 6e34498..9911975 100644
48279--- a/drivers/pci/probe.c
48280+++ b/drivers/pci/probe.c
48281@@ -175,7 +175,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
48282 struct pci_bus_region region, inverted_region;
48283 bool bar_too_big = false, bar_disabled = false;
48284
48285- mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
48286+ mask = type ? (u32)PCI_ROM_ADDRESS_MASK : ~0;
48287
48288 /* No printks while decoding is disabled! */
48289 if (!dev->mmio_always_on) {
48290diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
48291index 46d1378..30e452b 100644
48292--- a/drivers/pci/proc.c
48293+++ b/drivers/pci/proc.c
48294@@ -434,7 +434,16 @@ static const struct file_operations proc_bus_pci_dev_operations = {
48295 static int __init pci_proc_init(void)
48296 {
48297 struct pci_dev *dev = NULL;
48298+
48299+#ifdef CONFIG_GRKERNSEC_PROC_ADD
48300+#ifdef CONFIG_GRKERNSEC_PROC_USER
48301+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
48302+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
48303+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
48304+#endif
48305+#else
48306 proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
48307+#endif
48308 proc_create("devices", 0, proc_bus_pci_dir,
48309 &proc_bus_pci_dev_operations);
48310 proc_initialized = 1;
48311diff --git a/drivers/platform/chrome/chromeos_laptop.c b/drivers/platform/chrome/chromeos_laptop.c
48312index 7f3aad0..7d604bb 100644
48313--- a/drivers/platform/chrome/chromeos_laptop.c
48314+++ b/drivers/platform/chrome/chromeos_laptop.c
48315@@ -406,7 +406,7 @@ static struct chromeos_laptop cr48 = {
48316 .callback = chromeos_laptop_dmi_matched, \
48317 .driver_data = (void *)&board_
48318
48319-static struct dmi_system_id chromeos_laptop_dmi_table[] __initdata = {
48320+static struct dmi_system_id chromeos_laptop_dmi_table[] __initconst = {
48321 {
48322 .ident = "Samsung Series 5 550",
48323 .matches = {
48324diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
48325index c5e082f..d6307a0 100644
48326--- a/drivers/platform/x86/asus-wmi.c
48327+++ b/drivers/platform/x86/asus-wmi.c
48328@@ -1595,6 +1595,10 @@ static int show_dsts(struct seq_file *m, void *data)
48329 int err;
48330 u32 retval = -1;
48331
48332+#ifdef CONFIG_GRKERNSEC_KMEM
48333+ return -EPERM;
48334+#endif
48335+
48336 err = asus_wmi_get_devstate(asus, asus->debug.dev_id, &retval);
48337
48338 if (err < 0)
48339@@ -1611,6 +1615,10 @@ static int show_devs(struct seq_file *m, void *data)
48340 int err;
48341 u32 retval = -1;
48342
48343+#ifdef CONFIG_GRKERNSEC_KMEM
48344+ return -EPERM;
48345+#endif
48346+
48347 err = asus_wmi_set_devstate(asus->debug.dev_id, asus->debug.ctrl_param,
48348 &retval);
48349
48350@@ -1635,6 +1643,10 @@ static int show_call(struct seq_file *m, void *data)
48351 union acpi_object *obj;
48352 acpi_status status;
48353
48354+#ifdef CONFIG_GRKERNSEC_KMEM
48355+ return -EPERM;
48356+#endif
48357+
48358 status = wmi_evaluate_method(ASUS_WMI_MGMT_GUID,
48359 1, asus->debug.method_id,
48360 &input, &output);
48361diff --git a/drivers/platform/x86/msi-laptop.c b/drivers/platform/x86/msi-laptop.c
48362index 62f8030..c7f2a45 100644
48363--- a/drivers/platform/x86/msi-laptop.c
48364+++ b/drivers/platform/x86/msi-laptop.c
48365@@ -1000,12 +1000,14 @@ static int __init load_scm_model_init(struct platform_device *sdev)
48366
48367 if (!quirks->ec_read_only) {
48368 /* allow userland write sysfs file */
48369- dev_attr_bluetooth.store = store_bluetooth;
48370- dev_attr_wlan.store = store_wlan;
48371- dev_attr_threeg.store = store_threeg;
48372- dev_attr_bluetooth.attr.mode |= S_IWUSR;
48373- dev_attr_wlan.attr.mode |= S_IWUSR;
48374- dev_attr_threeg.attr.mode |= S_IWUSR;
48375+ pax_open_kernel();
48376+ *(void **)&dev_attr_bluetooth.store = store_bluetooth;
48377+ *(void **)&dev_attr_wlan.store = store_wlan;
48378+ *(void **)&dev_attr_threeg.store = store_threeg;
48379+ *(umode_t *)&dev_attr_bluetooth.attr.mode |= S_IWUSR;
48380+ *(umode_t *)&dev_attr_wlan.attr.mode |= S_IWUSR;
48381+ *(umode_t *)&dev_attr_threeg.attr.mode |= S_IWUSR;
48382+ pax_close_kernel();
48383 }
48384
48385 /* disable hardware control by fn key */
48386diff --git a/drivers/platform/x86/msi-wmi.c b/drivers/platform/x86/msi-wmi.c
48387index 70222f2..8c8ce66 100644
48388--- a/drivers/platform/x86/msi-wmi.c
48389+++ b/drivers/platform/x86/msi-wmi.c
48390@@ -183,7 +183,7 @@ static const struct backlight_ops msi_backlight_ops = {
48391 static void msi_wmi_notify(u32 value, void *context)
48392 {
48393 struct acpi_buffer response = { ACPI_ALLOCATE_BUFFER, NULL };
48394- static struct key_entry *key;
48395+ struct key_entry *key;
48396 union acpi_object *obj;
48397 acpi_status status;
48398
48399diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
48400index 8f8551a..3ace3ca 100644
48401--- a/drivers/platform/x86/sony-laptop.c
48402+++ b/drivers/platform/x86/sony-laptop.c
48403@@ -2451,7 +2451,7 @@ static void sony_nc_gfx_switch_cleanup(struct platform_device *pd)
48404 }
48405
48406 /* High speed charging function */
48407-static struct device_attribute *hsc_handle;
48408+static device_attribute_no_const *hsc_handle;
48409
48410 static ssize_t sony_nc_highspeed_charging_store(struct device *dev,
48411 struct device_attribute *attr,
48412diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
48413index e2a91c8..986cc9f 100644
48414--- a/drivers/platform/x86/thinkpad_acpi.c
48415+++ b/drivers/platform/x86/thinkpad_acpi.c
48416@@ -2094,7 +2094,7 @@ static int hotkey_mask_get(void)
48417 return 0;
48418 }
48419
48420-void static hotkey_mask_warn_incomplete_mask(void)
48421+static void hotkey_mask_warn_incomplete_mask(void)
48422 {
48423 /* log only what the user can fix... */
48424 const u32 wantedmask = hotkey_driver_mask &
48425@@ -2321,11 +2321,6 @@ static void hotkey_read_nvram(struct tp_nvram_state *n, const u32 m)
48426 }
48427 }
48428
48429-static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
48430- struct tp_nvram_state *newn,
48431- const u32 event_mask)
48432-{
48433-
48434 #define TPACPI_COMPARE_KEY(__scancode, __member) \
48435 do { \
48436 if ((event_mask & (1 << __scancode)) && \
48437@@ -2339,36 +2334,42 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
48438 tpacpi_hotkey_send_key(__scancode); \
48439 } while (0)
48440
48441- void issue_volchange(const unsigned int oldvol,
48442- const unsigned int newvol)
48443- {
48444- unsigned int i = oldvol;
48445+static void issue_volchange(const unsigned int oldvol,
48446+ const unsigned int newvol,
48447+ const u32 event_mask)
48448+{
48449+ unsigned int i = oldvol;
48450
48451- while (i > newvol) {
48452- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEDOWN);
48453- i--;
48454- }
48455- while (i < newvol) {
48456- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
48457- i++;
48458- }
48459+ while (i > newvol) {
48460+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEDOWN);
48461+ i--;
48462 }
48463+ while (i < newvol) {
48464+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
48465+ i++;
48466+ }
48467+}
48468
48469- void issue_brightnesschange(const unsigned int oldbrt,
48470- const unsigned int newbrt)
48471- {
48472- unsigned int i = oldbrt;
48473+static void issue_brightnesschange(const unsigned int oldbrt,
48474+ const unsigned int newbrt,
48475+ const u32 event_mask)
48476+{
48477+ unsigned int i = oldbrt;
48478
48479- while (i > newbrt) {
48480- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNEND);
48481- i--;
48482- }
48483- while (i < newbrt) {
48484- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
48485- i++;
48486- }
48487+ while (i > newbrt) {
48488+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNEND);
48489+ i--;
48490+ }
48491+ while (i < newbrt) {
48492+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
48493+ i++;
48494 }
48495+}
48496
48497+static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
48498+ struct tp_nvram_state *newn,
48499+ const u32 event_mask)
48500+{
48501 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_THINKPAD, thinkpad_toggle);
48502 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNSPACE, zoom_toggle);
48503 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNF7, display_toggle);
48504@@ -2402,7 +2403,7 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
48505 oldn->volume_level != newn->volume_level) {
48506 /* recently muted, or repeated mute keypress, or
48507 * multiple presses ending in mute */
48508- issue_volchange(oldn->volume_level, newn->volume_level);
48509+ issue_volchange(oldn->volume_level, newn->volume_level, event_mask);
48510 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_MUTE);
48511 }
48512 } else {
48513@@ -2412,7 +2413,7 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
48514 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
48515 }
48516 if (oldn->volume_level != newn->volume_level) {
48517- issue_volchange(oldn->volume_level, newn->volume_level);
48518+ issue_volchange(oldn->volume_level, newn->volume_level, event_mask);
48519 } else if (oldn->volume_toggle != newn->volume_toggle) {
48520 /* repeated vol up/down keypress at end of scale ? */
48521 if (newn->volume_level == 0)
48522@@ -2425,7 +2426,8 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
48523 /* handle brightness */
48524 if (oldn->brightness_level != newn->brightness_level) {
48525 issue_brightnesschange(oldn->brightness_level,
48526- newn->brightness_level);
48527+ newn->brightness_level,
48528+ event_mask);
48529 } else if (oldn->brightness_toggle != newn->brightness_toggle) {
48530 /* repeated key presses that didn't change state */
48531 if (newn->brightness_level == 0)
48532@@ -2434,10 +2436,10 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
48533 && !tp_features.bright_unkfw)
48534 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
48535 }
48536+}
48537
48538 #undef TPACPI_COMPARE_KEY
48539 #undef TPACPI_MAY_SEND_KEY
48540-}
48541
48542 /*
48543 * Polling driver
48544diff --git a/drivers/pnp/pnpbios/bioscalls.c b/drivers/pnp/pnpbios/bioscalls.c
48545index 769d265..a3a05ca 100644
48546--- a/drivers/pnp/pnpbios/bioscalls.c
48547+++ b/drivers/pnp/pnpbios/bioscalls.c
48548@@ -58,7 +58,7 @@ do { \
48549 set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
48550 } while(0)
48551
48552-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
48553+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
48554 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
48555
48556 /*
48557@@ -95,7 +95,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
48558
48559 cpu = get_cpu();
48560 save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
48561+
48562+ pax_open_kernel();
48563 get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
48564+ pax_close_kernel();
48565
48566 /* On some boxes IRQ's during PnP BIOS calls are deadly. */
48567 spin_lock_irqsave(&pnp_bios_lock, flags);
48568@@ -133,7 +136,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
48569 :"memory");
48570 spin_unlock_irqrestore(&pnp_bios_lock, flags);
48571
48572+ pax_open_kernel();
48573 get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
48574+ pax_close_kernel();
48575+
48576 put_cpu();
48577
48578 /* If we get here and this is set then the PnP BIOS faulted on us. */
48579@@ -467,7 +473,7 @@ int pnp_bios_read_escd(char *data, u32 nvram_base)
48580 return status;
48581 }
48582
48583-void pnpbios_calls_init(union pnp_bios_install_struct *header)
48584+void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
48585 {
48586 int i;
48587
48588@@ -475,6 +481,8 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
48589 pnp_bios_callpoint.offset = header->fields.pm16offset;
48590 pnp_bios_callpoint.segment = PNP_CS16;
48591
48592+ pax_open_kernel();
48593+
48594 for_each_possible_cpu(i) {
48595 struct desc_struct *gdt = get_cpu_gdt_table(i);
48596 if (!gdt)
48597@@ -486,4 +494,6 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
48598 set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
48599 (unsigned long)__va(header->fields.pm16dseg));
48600 }
48601+
48602+ pax_close_kernel();
48603 }
48604diff --git a/drivers/pnp/resource.c b/drivers/pnp/resource.c
48605index bacddd1..65ea100 100644
48606--- a/drivers/pnp/resource.c
48607+++ b/drivers/pnp/resource.c
48608@@ -360,7 +360,7 @@ int pnp_check_irq(struct pnp_dev *dev, struct resource *res)
48609 return 1;
48610
48611 /* check if the resource is valid */
48612- if (*irq < 0 || *irq > 15)
48613+ if (*irq > 15)
48614 return 0;
48615
48616 /* check if the resource is reserved */
48617@@ -424,7 +424,7 @@ int pnp_check_dma(struct pnp_dev *dev, struct resource *res)
48618 return 1;
48619
48620 /* check if the resource is valid */
48621- if (*dma < 0 || *dma == 4 || *dma > 7)
48622+ if (*dma == 4 || *dma > 7)
48623 return 0;
48624
48625 /* check if the resource is reserved */
48626diff --git a/drivers/power/pda_power.c b/drivers/power/pda_power.c
48627index 0c52e2a..3421ab7 100644
48628--- a/drivers/power/pda_power.c
48629+++ b/drivers/power/pda_power.c
48630@@ -37,7 +37,11 @@ static int polling;
48631
48632 #if IS_ENABLED(CONFIG_USB_PHY)
48633 static struct usb_phy *transceiver;
48634-static struct notifier_block otg_nb;
48635+static int otg_handle_notification(struct notifier_block *nb,
48636+ unsigned long event, void *unused);
48637+static struct notifier_block otg_nb = {
48638+ .notifier_call = otg_handle_notification
48639+};
48640 #endif
48641
48642 static struct regulator *ac_draw;
48643@@ -369,7 +373,6 @@ static int pda_power_probe(struct platform_device *pdev)
48644
48645 #if IS_ENABLED(CONFIG_USB_PHY)
48646 if (!IS_ERR_OR_NULL(transceiver) && pdata->use_otg_notifier) {
48647- otg_nb.notifier_call = otg_handle_notification;
48648 ret = usb_register_notifier(transceiver, &otg_nb);
48649 if (ret) {
48650 dev_err(dev, "failure to register otg notifier\n");
48651diff --git a/drivers/power/power_supply.h b/drivers/power/power_supply.h
48652index cc439fd..8fa30df 100644
48653--- a/drivers/power/power_supply.h
48654+++ b/drivers/power/power_supply.h
48655@@ -16,12 +16,12 @@ struct power_supply;
48656
48657 #ifdef CONFIG_SYSFS
48658
48659-extern void power_supply_init_attrs(struct device_type *dev_type);
48660+extern void power_supply_init_attrs(void);
48661 extern int power_supply_uevent(struct device *dev, struct kobj_uevent_env *env);
48662
48663 #else
48664
48665-static inline void power_supply_init_attrs(struct device_type *dev_type) {}
48666+static inline void power_supply_init_attrs(void) {}
48667 #define power_supply_uevent NULL
48668
48669 #endif /* CONFIG_SYSFS */
48670diff --git a/drivers/power/power_supply_core.c b/drivers/power/power_supply_core.c
48671index 2660664..75fcb04 100644
48672--- a/drivers/power/power_supply_core.c
48673+++ b/drivers/power/power_supply_core.c
48674@@ -28,7 +28,10 @@ EXPORT_SYMBOL_GPL(power_supply_class);
48675 ATOMIC_NOTIFIER_HEAD(power_supply_notifier);
48676 EXPORT_SYMBOL_GPL(power_supply_notifier);
48677
48678-static struct device_type power_supply_dev_type;
48679+extern const struct attribute_group *power_supply_attr_groups[];
48680+static struct device_type power_supply_dev_type = {
48681+ .groups = power_supply_attr_groups,
48682+};
48683
48684 static bool __power_supply_is_supplied_by(struct power_supply *supplier,
48685 struct power_supply *supply)
48686@@ -628,7 +631,7 @@ static int __init power_supply_class_init(void)
48687 return PTR_ERR(power_supply_class);
48688
48689 power_supply_class->dev_uevent = power_supply_uevent;
48690- power_supply_init_attrs(&power_supply_dev_type);
48691+ power_supply_init_attrs();
48692
48693 return 0;
48694 }
48695diff --git a/drivers/power/power_supply_sysfs.c b/drivers/power/power_supply_sysfs.c
48696index 44420d1..967126e 100644
48697--- a/drivers/power/power_supply_sysfs.c
48698+++ b/drivers/power/power_supply_sysfs.c
48699@@ -230,17 +230,15 @@ static struct attribute_group power_supply_attr_group = {
48700 .is_visible = power_supply_attr_is_visible,
48701 };
48702
48703-static const struct attribute_group *power_supply_attr_groups[] = {
48704+const struct attribute_group *power_supply_attr_groups[] = {
48705 &power_supply_attr_group,
48706 NULL,
48707 };
48708
48709-void power_supply_init_attrs(struct device_type *dev_type)
48710+void power_supply_init_attrs(void)
48711 {
48712 int i;
48713
48714- dev_type->groups = power_supply_attr_groups;
48715-
48716 for (i = 0; i < ARRAY_SIZE(power_supply_attrs); i++)
48717 __power_supply_attrs[i] = &power_supply_attrs[i].attr;
48718 }
48719diff --git a/drivers/powercap/powercap_sys.c b/drivers/powercap/powercap_sys.c
48720index 84419af..268ede8 100644
48721--- a/drivers/powercap/powercap_sys.c
48722+++ b/drivers/powercap/powercap_sys.c
48723@@ -154,8 +154,77 @@ struct powercap_constraint_attr {
48724 struct device_attribute name_attr;
48725 };
48726
48727+static ssize_t show_constraint_name(struct device *dev,
48728+ struct device_attribute *dev_attr,
48729+ char *buf);
48730+
48731 static struct powercap_constraint_attr
48732- constraint_attrs[MAX_CONSTRAINTS_PER_ZONE];
48733+ constraint_attrs[MAX_CONSTRAINTS_PER_ZONE] = {
48734+ [0 ... MAX_CONSTRAINTS_PER_ZONE - 1] = {
48735+ .power_limit_attr = {
48736+ .attr = {
48737+ .name = NULL,
48738+ .mode = S_IWUSR | S_IRUGO
48739+ },
48740+ .show = show_constraint_power_limit_uw,
48741+ .store = store_constraint_power_limit_uw
48742+ },
48743+
48744+ .time_window_attr = {
48745+ .attr = {
48746+ .name = NULL,
48747+ .mode = S_IWUSR | S_IRUGO
48748+ },
48749+ .show = show_constraint_time_window_us,
48750+ .store = store_constraint_time_window_us
48751+ },
48752+
48753+ .max_power_attr = {
48754+ .attr = {
48755+ .name = NULL,
48756+ .mode = S_IRUGO
48757+ },
48758+ .show = show_constraint_max_power_uw,
48759+ .store = NULL
48760+ },
48761+
48762+ .min_power_attr = {
48763+ .attr = {
48764+ .name = NULL,
48765+ .mode = S_IRUGO
48766+ },
48767+ .show = show_constraint_min_power_uw,
48768+ .store = NULL
48769+ },
48770+
48771+ .max_time_window_attr = {
48772+ .attr = {
48773+ .name = NULL,
48774+ .mode = S_IRUGO
48775+ },
48776+ .show = show_constraint_max_time_window_us,
48777+ .store = NULL
48778+ },
48779+
48780+ .min_time_window_attr = {
48781+ .attr = {
48782+ .name = NULL,
48783+ .mode = S_IRUGO
48784+ },
48785+ .show = show_constraint_min_time_window_us,
48786+ .store = NULL
48787+ },
48788+
48789+ .name_attr = {
48790+ .attr = {
48791+ .name = NULL,
48792+ .mode = S_IRUGO
48793+ },
48794+ .show = show_constraint_name,
48795+ .store = NULL
48796+ }
48797+ }
48798+};
48799
48800 /* A list of powercap control_types */
48801 static LIST_HEAD(powercap_cntrl_list);
48802@@ -193,23 +262,16 @@ static ssize_t show_constraint_name(struct device *dev,
48803 }
48804
48805 static int create_constraint_attribute(int id, const char *name,
48806- int mode,
48807- struct device_attribute *dev_attr,
48808- ssize_t (*show)(struct device *,
48809- struct device_attribute *, char *),
48810- ssize_t (*store)(struct device *,
48811- struct device_attribute *,
48812- const char *, size_t)
48813- )
48814+ struct device_attribute *dev_attr)
48815 {
48816+ name = kasprintf(GFP_KERNEL, "constraint_%d_%s", id, name);
48817
48818- dev_attr->attr.name = kasprintf(GFP_KERNEL, "constraint_%d_%s",
48819- id, name);
48820- if (!dev_attr->attr.name)
48821+ if (!name)
48822 return -ENOMEM;
48823- dev_attr->attr.mode = mode;
48824- dev_attr->show = show;
48825- dev_attr->store = store;
48826+
48827+ pax_open_kernel();
48828+ *(const char **)&dev_attr->attr.name = name;
48829+ pax_close_kernel();
48830
48831 return 0;
48832 }
48833@@ -236,49 +298,31 @@ static int seed_constraint_attributes(void)
48834
48835 for (i = 0; i < MAX_CONSTRAINTS_PER_ZONE; ++i) {
48836 ret = create_constraint_attribute(i, "power_limit_uw",
48837- S_IWUSR | S_IRUGO,
48838- &constraint_attrs[i].power_limit_attr,
48839- show_constraint_power_limit_uw,
48840- store_constraint_power_limit_uw);
48841+ &constraint_attrs[i].power_limit_attr);
48842 if (ret)
48843 goto err_alloc;
48844 ret = create_constraint_attribute(i, "time_window_us",
48845- S_IWUSR | S_IRUGO,
48846- &constraint_attrs[i].time_window_attr,
48847- show_constraint_time_window_us,
48848- store_constraint_time_window_us);
48849+ &constraint_attrs[i].time_window_attr);
48850 if (ret)
48851 goto err_alloc;
48852- ret = create_constraint_attribute(i, "name", S_IRUGO,
48853- &constraint_attrs[i].name_attr,
48854- show_constraint_name,
48855- NULL);
48856+ ret = create_constraint_attribute(i, "name",
48857+ &constraint_attrs[i].name_attr);
48858 if (ret)
48859 goto err_alloc;
48860- ret = create_constraint_attribute(i, "max_power_uw", S_IRUGO,
48861- &constraint_attrs[i].max_power_attr,
48862- show_constraint_max_power_uw,
48863- NULL);
48864+ ret = create_constraint_attribute(i, "max_power_uw",
48865+ &constraint_attrs[i].max_power_attr);
48866 if (ret)
48867 goto err_alloc;
48868- ret = create_constraint_attribute(i, "min_power_uw", S_IRUGO,
48869- &constraint_attrs[i].min_power_attr,
48870- show_constraint_min_power_uw,
48871- NULL);
48872+ ret = create_constraint_attribute(i, "min_power_uw",
48873+ &constraint_attrs[i].min_power_attr);
48874 if (ret)
48875 goto err_alloc;
48876 ret = create_constraint_attribute(i, "max_time_window_us",
48877- S_IRUGO,
48878- &constraint_attrs[i].max_time_window_attr,
48879- show_constraint_max_time_window_us,
48880- NULL);
48881+ &constraint_attrs[i].max_time_window_attr);
48882 if (ret)
48883 goto err_alloc;
48884 ret = create_constraint_attribute(i, "min_time_window_us",
48885- S_IRUGO,
48886- &constraint_attrs[i].min_time_window_attr,
48887- show_constraint_min_time_window_us,
48888- NULL);
48889+ &constraint_attrs[i].min_time_window_attr);
48890 if (ret)
48891 goto err_alloc;
48892
48893@@ -378,10 +422,12 @@ static void create_power_zone_common_attributes(
48894 power_zone->zone_dev_attrs[count++] =
48895 &dev_attr_max_energy_range_uj.attr;
48896 if (power_zone->ops->get_energy_uj) {
48897+ pax_open_kernel();
48898 if (power_zone->ops->reset_energy_uj)
48899- dev_attr_energy_uj.attr.mode = S_IWUSR | S_IRUGO;
48900+ *(umode_t *)&dev_attr_energy_uj.attr.mode = S_IWUSR | S_IRUGO;
48901 else
48902- dev_attr_energy_uj.attr.mode = S_IRUGO;
48903+ *(umode_t *)&dev_attr_energy_uj.attr.mode = S_IRUGO;
48904+ pax_close_kernel();
48905 power_zone->zone_dev_attrs[count++] =
48906 &dev_attr_energy_uj.attr;
48907 }
48908diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
48909index afca1bc..86840b8 100644
48910--- a/drivers/regulator/core.c
48911+++ b/drivers/regulator/core.c
48912@@ -3366,7 +3366,7 @@ regulator_register(const struct regulator_desc *regulator_desc,
48913 {
48914 const struct regulation_constraints *constraints = NULL;
48915 const struct regulator_init_data *init_data;
48916- static atomic_t regulator_no = ATOMIC_INIT(0);
48917+ static atomic_unchecked_t regulator_no = ATOMIC_INIT(0);
48918 struct regulator_dev *rdev;
48919 struct device *dev;
48920 int ret, i;
48921@@ -3436,7 +3436,7 @@ regulator_register(const struct regulator_desc *regulator_desc,
48922 rdev->dev.of_node = config->of_node;
48923 rdev->dev.parent = dev;
48924 dev_set_name(&rdev->dev, "regulator.%d",
48925- atomic_inc_return(&regulator_no) - 1);
48926+ atomic_inc_return_unchecked(&regulator_no) - 1);
48927 ret = device_register(&rdev->dev);
48928 if (ret != 0) {
48929 put_device(&rdev->dev);
48930diff --git a/drivers/regulator/max8660.c b/drivers/regulator/max8660.c
48931index 8d94d3d..653b623 100644
48932--- a/drivers/regulator/max8660.c
48933+++ b/drivers/regulator/max8660.c
48934@@ -420,8 +420,10 @@ static int max8660_probe(struct i2c_client *client,
48935 max8660->shadow_regs[MAX8660_OVER1] = 5;
48936 } else {
48937 /* Otherwise devices can be toggled via software */
48938- max8660_dcdc_ops.enable = max8660_dcdc_enable;
48939- max8660_dcdc_ops.disable = max8660_dcdc_disable;
48940+ pax_open_kernel();
48941+ *(void **)&max8660_dcdc_ops.enable = max8660_dcdc_enable;
48942+ *(void **)&max8660_dcdc_ops.disable = max8660_dcdc_disable;
48943+ pax_close_kernel();
48944 }
48945
48946 /*
48947diff --git a/drivers/regulator/max8973-regulator.c b/drivers/regulator/max8973-regulator.c
48948index 892aa1e..ebd1b9c 100644
48949--- a/drivers/regulator/max8973-regulator.c
48950+++ b/drivers/regulator/max8973-regulator.c
48951@@ -406,9 +406,11 @@ static int max8973_probe(struct i2c_client *client,
48952 if (!pdata || !pdata->enable_ext_control) {
48953 max->desc.enable_reg = MAX8973_VOUT;
48954 max->desc.enable_mask = MAX8973_VOUT_ENABLE;
48955- max->ops.enable = regulator_enable_regmap;
48956- max->ops.disable = regulator_disable_regmap;
48957- max->ops.is_enabled = regulator_is_enabled_regmap;
48958+ pax_open_kernel();
48959+ *(void **)&max->ops.enable = regulator_enable_regmap;
48960+ *(void **)&max->ops.disable = regulator_disable_regmap;
48961+ *(void **)&max->ops.is_enabled = regulator_is_enabled_regmap;
48962+ pax_close_kernel();
48963 }
48964
48965 if (pdata) {
48966diff --git a/drivers/regulator/mc13892-regulator.c b/drivers/regulator/mc13892-regulator.c
48967index f374fa5..26f0683 100644
48968--- a/drivers/regulator/mc13892-regulator.c
48969+++ b/drivers/regulator/mc13892-regulator.c
48970@@ -582,10 +582,12 @@ static int mc13892_regulator_probe(struct platform_device *pdev)
48971 }
48972 mc13xxx_unlock(mc13892);
48973
48974- mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
48975+ pax_open_kernel();
48976+ *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
48977 = mc13892_vcam_set_mode;
48978- mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
48979+ *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
48980 = mc13892_vcam_get_mode;
48981+ pax_close_kernel();
48982
48983 mc13xxx_data = mc13xxx_parse_regulators_dt(pdev, mc13892_regulators,
48984 ARRAY_SIZE(mc13892_regulators));
48985diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
48986index cae212f..58a3980 100644
48987--- a/drivers/rtc/rtc-cmos.c
48988+++ b/drivers/rtc/rtc-cmos.c
48989@@ -777,7 +777,9 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq)
48990 hpet_rtc_timer_init();
48991
48992 /* export at least the first block of NVRAM */
48993- nvram.size = address_space - NVRAM_OFFSET;
48994+ pax_open_kernel();
48995+ *(size_t *)&nvram.size = address_space - NVRAM_OFFSET;
48996+ pax_close_kernel();
48997 retval = sysfs_create_bin_file(&dev->kobj, &nvram);
48998 if (retval < 0) {
48999 dev_dbg(dev, "can't create nvram file? %d\n", retval);
49000diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
49001index d049393..bb20be0 100644
49002--- a/drivers/rtc/rtc-dev.c
49003+++ b/drivers/rtc/rtc-dev.c
49004@@ -16,6 +16,7 @@
49005 #include <linux/module.h>
49006 #include <linux/rtc.h>
49007 #include <linux/sched.h>
49008+#include <linux/grsecurity.h>
49009 #include "rtc-core.h"
49010
49011 static dev_t rtc_devt;
49012@@ -347,6 +348,8 @@ static long rtc_dev_ioctl(struct file *file,
49013 if (copy_from_user(&tm, uarg, sizeof(tm)))
49014 return -EFAULT;
49015
49016+ gr_log_timechange();
49017+
49018 return rtc_set_time(rtc, &tm);
49019
49020 case RTC_PIE_ON:
49021diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
49022index 4e75345..09f8663 100644
49023--- a/drivers/rtc/rtc-ds1307.c
49024+++ b/drivers/rtc/rtc-ds1307.c
49025@@ -107,7 +107,7 @@ struct ds1307 {
49026 u8 offset; /* register's offset */
49027 u8 regs[11];
49028 u16 nvram_offset;
49029- struct bin_attribute *nvram;
49030+ bin_attribute_no_const *nvram;
49031 enum ds_type type;
49032 unsigned long flags;
49033 #define HAS_NVRAM 0 /* bit 0 == sysfs file active */
49034diff --git a/drivers/rtc/rtc-m48t59.c b/drivers/rtc/rtc-m48t59.c
49035index 11880c1..b823aa4 100644
49036--- a/drivers/rtc/rtc-m48t59.c
49037+++ b/drivers/rtc/rtc-m48t59.c
49038@@ -483,7 +483,9 @@ static int m48t59_rtc_probe(struct platform_device *pdev)
49039 if (IS_ERR(m48t59->rtc))
49040 return PTR_ERR(m48t59->rtc);
49041
49042- m48t59_nvram_attr.size = pdata->offset;
49043+ pax_open_kernel();
49044+ *(size_t *)&m48t59_nvram_attr.size = pdata->offset;
49045+ pax_close_kernel();
49046
49047 ret = sysfs_create_bin_file(&pdev->dev.kobj, &m48t59_nvram_attr);
49048 if (ret)
49049diff --git a/drivers/scsi/aic7xxx/aic79xx_pci.c b/drivers/scsi/aic7xxx/aic79xx_pci.c
49050index 14b5f8d..cc9bd26 100644
49051--- a/drivers/scsi/aic7xxx/aic79xx_pci.c
49052+++ b/drivers/scsi/aic7xxx/aic79xx_pci.c
49053@@ -827,7 +827,7 @@ ahd_pci_intr(struct ahd_softc *ahd)
49054 for (bit = 0; bit < 8; bit++) {
49055
49056 if ((pci_status[i] & (0x1 << bit)) != 0) {
49057- static const char *s;
49058+ const char *s;
49059
49060 s = pci_status_strings[bit];
49061 if (i == 7/*TARG*/ && bit == 3)
49062@@ -887,23 +887,15 @@ ahd_pci_split_intr(struct ahd_softc *ahd, u_int intstat)
49063
49064 for (bit = 0; bit < 8; bit++) {
49065
49066- if ((split_status[i] & (0x1 << bit)) != 0) {
49067- static const char *s;
49068-
49069- s = split_status_strings[bit];
49070- printk(s, ahd_name(ahd),
49071+ if ((split_status[i] & (0x1 << bit)) != 0)
49072+ printk(split_status_strings[bit], ahd_name(ahd),
49073 split_status_source[i]);
49074- }
49075
49076 if (i > 1)
49077 continue;
49078
49079- if ((sg_split_status[i] & (0x1 << bit)) != 0) {
49080- static const char *s;
49081-
49082- s = split_status_strings[bit];
49083- printk(s, ahd_name(ahd), "SG");
49084- }
49085+ if ((sg_split_status[i] & (0x1 << bit)) != 0)
49086+ printk(split_status_strings[bit], ahd_name(ahd), "SG");
49087 }
49088 }
49089 /*
49090diff --git a/drivers/scsi/bfa/bfa_fcpim.h b/drivers/scsi/bfa/bfa_fcpim.h
49091index e693af6..2e525b6 100644
49092--- a/drivers/scsi/bfa/bfa_fcpim.h
49093+++ b/drivers/scsi/bfa/bfa_fcpim.h
49094@@ -36,7 +36,7 @@ struct bfa_iotag_s {
49095
49096 struct bfa_itn_s {
49097 bfa_isr_func_t isr;
49098-};
49099+} __no_const;
49100
49101 void bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
49102 void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m));
49103diff --git a/drivers/scsi/bfa/bfa_fcs.c b/drivers/scsi/bfa/bfa_fcs.c
49104index a3ab5cc..8143622 100644
49105--- a/drivers/scsi/bfa/bfa_fcs.c
49106+++ b/drivers/scsi/bfa/bfa_fcs.c
49107@@ -38,10 +38,21 @@ struct bfa_fcs_mod_s {
49108 #define BFA_FCS_MODULE(_mod) { _mod ## _modinit, _mod ## _modexit }
49109
49110 static struct bfa_fcs_mod_s fcs_modules[] = {
49111- { bfa_fcs_port_attach, NULL, NULL },
49112- { bfa_fcs_uf_attach, NULL, NULL },
49113- { bfa_fcs_fabric_attach, bfa_fcs_fabric_modinit,
49114- bfa_fcs_fabric_modexit },
49115+ {
49116+ .attach = bfa_fcs_port_attach,
49117+ .modinit = NULL,
49118+ .modexit = NULL
49119+ },
49120+ {
49121+ .attach = bfa_fcs_uf_attach,
49122+ .modinit = NULL,
49123+ .modexit = NULL
49124+ },
49125+ {
49126+ .attach = bfa_fcs_fabric_attach,
49127+ .modinit = bfa_fcs_fabric_modinit,
49128+ .modexit = bfa_fcs_fabric_modexit
49129+ },
49130 };
49131
49132 /*
49133diff --git a/drivers/scsi/bfa/bfa_fcs_lport.c b/drivers/scsi/bfa/bfa_fcs_lport.c
49134index ff75ef8..2dfe00a 100644
49135--- a/drivers/scsi/bfa/bfa_fcs_lport.c
49136+++ b/drivers/scsi/bfa/bfa_fcs_lport.c
49137@@ -89,15 +89,26 @@ static struct {
49138 void (*offline) (struct bfa_fcs_lport_s *port);
49139 } __port_action[] = {
49140 {
49141- bfa_fcs_lport_unknown_init, bfa_fcs_lport_unknown_online,
49142- bfa_fcs_lport_unknown_offline}, {
49143- bfa_fcs_lport_fab_init, bfa_fcs_lport_fab_online,
49144- bfa_fcs_lport_fab_offline}, {
49145- bfa_fcs_lport_n2n_init, bfa_fcs_lport_n2n_online,
49146- bfa_fcs_lport_n2n_offline}, {
49147- bfa_fcs_lport_loop_init, bfa_fcs_lport_loop_online,
49148- bfa_fcs_lport_loop_offline},
49149- };
49150+ .init = bfa_fcs_lport_unknown_init,
49151+ .online = bfa_fcs_lport_unknown_online,
49152+ .offline = bfa_fcs_lport_unknown_offline
49153+ },
49154+ {
49155+ .init = bfa_fcs_lport_fab_init,
49156+ .online = bfa_fcs_lport_fab_online,
49157+ .offline = bfa_fcs_lport_fab_offline
49158+ },
49159+ {
49160+ .init = bfa_fcs_lport_n2n_init,
49161+ .online = bfa_fcs_lport_n2n_online,
49162+ .offline = bfa_fcs_lport_n2n_offline
49163+ },
49164+ {
49165+ .init = bfa_fcs_lport_loop_init,
49166+ .online = bfa_fcs_lport_loop_online,
49167+ .offline = bfa_fcs_lport_loop_offline
49168+ },
49169+};
49170
49171 /*
49172 * fcs_port_sm FCS logical port state machine
49173diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
49174index 2e28392..9d865b6 100644
49175--- a/drivers/scsi/bfa/bfa_ioc.h
49176+++ b/drivers/scsi/bfa/bfa_ioc.h
49177@@ -258,7 +258,7 @@ struct bfa_ioc_cbfn_s {
49178 bfa_ioc_disable_cbfn_t disable_cbfn;
49179 bfa_ioc_hbfail_cbfn_t hbfail_cbfn;
49180 bfa_ioc_reset_cbfn_t reset_cbfn;
49181-};
49182+} __no_const;
49183
49184 /*
49185 * IOC event notification mechanism.
49186@@ -352,7 +352,7 @@ struct bfa_ioc_hwif_s {
49187 void (*ioc_set_alt_fwstate) (struct bfa_ioc_s *ioc,
49188 enum bfi_ioc_state fwstate);
49189 enum bfi_ioc_state (*ioc_get_alt_fwstate) (struct bfa_ioc_s *ioc);
49190-};
49191+} __no_const;
49192
49193 /*
49194 * Queue element to wait for room in request queue. FIFO order is
49195diff --git a/drivers/scsi/bfa/bfa_modules.h b/drivers/scsi/bfa/bfa_modules.h
49196index a14c784..6de6790 100644
49197--- a/drivers/scsi/bfa/bfa_modules.h
49198+++ b/drivers/scsi/bfa/bfa_modules.h
49199@@ -78,12 +78,12 @@ enum {
49200 \
49201 extern struct bfa_module_s hal_mod_ ## __mod; \
49202 struct bfa_module_s hal_mod_ ## __mod = { \
49203- bfa_ ## __mod ## _meminfo, \
49204- bfa_ ## __mod ## _attach, \
49205- bfa_ ## __mod ## _detach, \
49206- bfa_ ## __mod ## _start, \
49207- bfa_ ## __mod ## _stop, \
49208- bfa_ ## __mod ## _iocdisable, \
49209+ .meminfo = bfa_ ## __mod ## _meminfo, \
49210+ .attach = bfa_ ## __mod ## _attach, \
49211+ .detach = bfa_ ## __mod ## _detach, \
49212+ .start = bfa_ ## __mod ## _start, \
49213+ .stop = bfa_ ## __mod ## _stop, \
49214+ .iocdisable = bfa_ ## __mod ## _iocdisable, \
49215 }
49216
49217 #define BFA_CACHELINE_SZ (256)
49218diff --git a/drivers/scsi/fcoe/fcoe_sysfs.c b/drivers/scsi/fcoe/fcoe_sysfs.c
49219index 045c4e1..13de803 100644
49220--- a/drivers/scsi/fcoe/fcoe_sysfs.c
49221+++ b/drivers/scsi/fcoe/fcoe_sysfs.c
49222@@ -33,8 +33,8 @@
49223 */
49224 #include "libfcoe.h"
49225
49226-static atomic_t ctlr_num;
49227-static atomic_t fcf_num;
49228+static atomic_unchecked_t ctlr_num;
49229+static atomic_unchecked_t fcf_num;
49230
49231 /*
49232 * fcoe_fcf_dev_loss_tmo: the default number of seconds that fcoe sysfs
49233@@ -685,7 +685,7 @@ struct fcoe_ctlr_device *fcoe_ctlr_device_add(struct device *parent,
49234 if (!ctlr)
49235 goto out;
49236
49237- ctlr->id = atomic_inc_return(&ctlr_num) - 1;
49238+ ctlr->id = atomic_inc_return_unchecked(&ctlr_num) - 1;
49239 ctlr->f = f;
49240 ctlr->mode = FIP_CONN_TYPE_FABRIC;
49241 INIT_LIST_HEAD(&ctlr->fcfs);
49242@@ -902,7 +902,7 @@ struct fcoe_fcf_device *fcoe_fcf_device_add(struct fcoe_ctlr_device *ctlr,
49243 fcf->dev.parent = &ctlr->dev;
49244 fcf->dev.bus = &fcoe_bus_type;
49245 fcf->dev.type = &fcoe_fcf_device_type;
49246- fcf->id = atomic_inc_return(&fcf_num) - 1;
49247+ fcf->id = atomic_inc_return_unchecked(&fcf_num) - 1;
49248 fcf->state = FCOE_FCF_STATE_UNKNOWN;
49249
49250 fcf->dev_loss_tmo = ctlr->fcf_dev_loss_tmo;
49251@@ -938,8 +938,8 @@ int __init fcoe_sysfs_setup(void)
49252 {
49253 int error;
49254
49255- atomic_set(&ctlr_num, 0);
49256- atomic_set(&fcf_num, 0);
49257+ atomic_set_unchecked(&ctlr_num, 0);
49258+ atomic_set_unchecked(&fcf_num, 0);
49259
49260 error = bus_register(&fcoe_bus_type);
49261 if (error)
49262diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
49263index f28ea07..34b16d3 100644
49264--- a/drivers/scsi/hosts.c
49265+++ b/drivers/scsi/hosts.c
49266@@ -42,7 +42,7 @@
49267 #include "scsi_logging.h"
49268
49269
49270-static atomic_t scsi_host_next_hn = ATOMIC_INIT(0); /* host_no for next new host */
49271+static atomic_unchecked_t scsi_host_next_hn = ATOMIC_INIT(0); /* host_no for next new host */
49272
49273
49274 static void scsi_host_cls_release(struct device *dev)
49275@@ -369,7 +369,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
49276 * subtract one because we increment first then return, but we need to
49277 * know what the next host number was before increment
49278 */
49279- shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
49280+ shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
49281 shost->dma_channel = 0xff;
49282
49283 /* These three are default values which can be overridden */
49284diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
49285index 868318a..e07ef3b 100644
49286--- a/drivers/scsi/hpsa.c
49287+++ b/drivers/scsi/hpsa.c
49288@@ -571,7 +571,7 @@ static inline u32 next_command(struct ctlr_info *h, u8 q)
49289 unsigned long flags;
49290
49291 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
49292- return h->access.command_completed(h, q);
49293+ return h->access->command_completed(h, q);
49294
49295 if ((rq->head[rq->current_entry] & 1) == rq->wraparound) {
49296 a = rq->head[rq->current_entry];
49297@@ -3474,7 +3474,7 @@ static void start_io(struct ctlr_info *h)
49298 while (!list_empty(&h->reqQ)) {
49299 c = list_entry(h->reqQ.next, struct CommandList, list);
49300 /* can't do anything if fifo is full */
49301- if ((h->access.fifo_full(h))) {
49302+ if ((h->access->fifo_full(h))) {
49303 h->fifo_recently_full = 1;
49304 dev_warn(&h->pdev->dev, "fifo full\n");
49305 break;
49306@@ -3498,7 +3498,7 @@ static void start_io(struct ctlr_info *h)
49307
49308 /* Tell the controller execute command */
49309 spin_unlock_irqrestore(&h->lock, flags);
49310- h->access.submit_command(h, c);
49311+ h->access->submit_command(h, c);
49312 spin_lock_irqsave(&h->lock, flags);
49313 }
49314 spin_unlock_irqrestore(&h->lock, flags);
49315@@ -3506,17 +3506,17 @@ static void start_io(struct ctlr_info *h)
49316
49317 static inline unsigned long get_next_completion(struct ctlr_info *h, u8 q)
49318 {
49319- return h->access.command_completed(h, q);
49320+ return h->access->command_completed(h, q);
49321 }
49322
49323 static inline bool interrupt_pending(struct ctlr_info *h)
49324 {
49325- return h->access.intr_pending(h);
49326+ return h->access->intr_pending(h);
49327 }
49328
49329 static inline long interrupt_not_for_us(struct ctlr_info *h)
49330 {
49331- return (h->access.intr_pending(h) == 0) ||
49332+ return (h->access->intr_pending(h) == 0) ||
49333 (h->interrupts_enabled == 0);
49334 }
49335
49336@@ -4442,7 +4442,7 @@ static int hpsa_pci_init(struct ctlr_info *h)
49337 if (prod_index < 0)
49338 return -ENODEV;
49339 h->product_name = products[prod_index].product_name;
49340- h->access = *(products[prod_index].access);
49341+ h->access = products[prod_index].access;
49342
49343 pci_disable_link_state(h->pdev, PCIE_LINK_STATE_L0S |
49344 PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
49345@@ -4712,7 +4712,7 @@ static void controller_lockup_detected(struct ctlr_info *h)
49346 {
49347 unsigned long flags;
49348
49349- h->access.set_intr_mask(h, HPSA_INTR_OFF);
49350+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
49351 spin_lock_irqsave(&h->lock, flags);
49352 h->lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
49353 spin_unlock_irqrestore(&h->lock, flags);
49354@@ -4843,7 +4843,7 @@ reinit_after_soft_reset:
49355 }
49356
49357 /* make sure the board interrupts are off */
49358- h->access.set_intr_mask(h, HPSA_INTR_OFF);
49359+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
49360
49361 if (hpsa_request_irq(h, do_hpsa_intr_msi, do_hpsa_intr_intx))
49362 goto clean2;
49363@@ -4877,7 +4877,7 @@ reinit_after_soft_reset:
49364 * fake ones to scoop up any residual completions.
49365 */
49366 spin_lock_irqsave(&h->lock, flags);
49367- h->access.set_intr_mask(h, HPSA_INTR_OFF);
49368+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
49369 spin_unlock_irqrestore(&h->lock, flags);
49370 free_irqs(h);
49371 rc = hpsa_request_irq(h, hpsa_msix_discard_completions,
49372@@ -4896,9 +4896,9 @@ reinit_after_soft_reset:
49373 dev_info(&h->pdev->dev, "Board READY.\n");
49374 dev_info(&h->pdev->dev,
49375 "Waiting for stale completions to drain.\n");
49376- h->access.set_intr_mask(h, HPSA_INTR_ON);
49377+ h->access->set_intr_mask(h, HPSA_INTR_ON);
49378 msleep(10000);
49379- h->access.set_intr_mask(h, HPSA_INTR_OFF);
49380+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
49381
49382 rc = controller_reset_failed(h->cfgtable);
49383 if (rc)
49384@@ -4919,7 +4919,7 @@ reinit_after_soft_reset:
49385 }
49386
49387 /* Turn the interrupts on so we can service requests */
49388- h->access.set_intr_mask(h, HPSA_INTR_ON);
49389+ h->access->set_intr_mask(h, HPSA_INTR_ON);
49390
49391 hpsa_hba_inquiry(h);
49392 hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */
49393@@ -4988,7 +4988,7 @@ static void hpsa_shutdown(struct pci_dev *pdev)
49394 * To write all data in the battery backed cache to disks
49395 */
49396 hpsa_flush_cache(h);
49397- h->access.set_intr_mask(h, HPSA_INTR_OFF);
49398+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
49399 hpsa_free_irqs_and_disable_msix(h);
49400 }
49401
49402@@ -5162,7 +5162,7 @@ static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 use_short_tags)
49403 return;
49404 }
49405 /* Change the access methods to the performant access methods */
49406- h->access = SA5_performant_access;
49407+ h->access = &SA5_performant_access;
49408 h->transMethod = CFGTBL_Trans_Performant;
49409 }
49410
49411diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
49412index 01c3283..4655219 100644
49413--- a/drivers/scsi/hpsa.h
49414+++ b/drivers/scsi/hpsa.h
49415@@ -79,7 +79,7 @@ struct ctlr_info {
49416 unsigned int msix_vector;
49417 unsigned int msi_vector;
49418 int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */
49419- struct access_method access;
49420+ struct access_method *access;
49421
49422 /* queue and queue Info */
49423 struct list_head reqQ;
49424@@ -388,19 +388,19 @@ static bool SA5_performant_intr_pending(struct ctlr_info *h)
49425 }
49426
49427 static struct access_method SA5_access = {
49428- SA5_submit_command,
49429- SA5_intr_mask,
49430- SA5_fifo_full,
49431- SA5_intr_pending,
49432- SA5_completed,
49433+ .submit_command = SA5_submit_command,
49434+ .set_intr_mask = SA5_intr_mask,
49435+ .fifo_full = SA5_fifo_full,
49436+ .intr_pending = SA5_intr_pending,
49437+ .command_completed = SA5_completed,
49438 };
49439
49440 static struct access_method SA5_performant_access = {
49441- SA5_submit_command,
49442- SA5_performant_intr_mask,
49443- SA5_fifo_full,
49444- SA5_performant_intr_pending,
49445- SA5_performant_completed,
49446+ .submit_command = SA5_submit_command,
49447+ .set_intr_mask = SA5_performant_intr_mask,
49448+ .fifo_full = SA5_fifo_full,
49449+ .intr_pending = SA5_performant_intr_pending,
49450+ .command_completed = SA5_performant_completed,
49451 };
49452
49453 struct board_type {
49454diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
49455index 1b3a094..068e683 100644
49456--- a/drivers/scsi/libfc/fc_exch.c
49457+++ b/drivers/scsi/libfc/fc_exch.c
49458@@ -101,12 +101,12 @@ struct fc_exch_mgr {
49459 u16 pool_max_index;
49460
49461 struct {
49462- atomic_t no_free_exch;
49463- atomic_t no_free_exch_xid;
49464- atomic_t xid_not_found;
49465- atomic_t xid_busy;
49466- atomic_t seq_not_found;
49467- atomic_t non_bls_resp;
49468+ atomic_unchecked_t no_free_exch;
49469+ atomic_unchecked_t no_free_exch_xid;
49470+ atomic_unchecked_t xid_not_found;
49471+ atomic_unchecked_t xid_busy;
49472+ atomic_unchecked_t seq_not_found;
49473+ atomic_unchecked_t non_bls_resp;
49474 } stats;
49475 };
49476
49477@@ -811,7 +811,7 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
49478 /* allocate memory for exchange */
49479 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
49480 if (!ep) {
49481- atomic_inc(&mp->stats.no_free_exch);
49482+ atomic_inc_unchecked(&mp->stats.no_free_exch);
49483 goto out;
49484 }
49485 memset(ep, 0, sizeof(*ep));
49486@@ -874,7 +874,7 @@ out:
49487 return ep;
49488 err:
49489 spin_unlock_bh(&pool->lock);
49490- atomic_inc(&mp->stats.no_free_exch_xid);
49491+ atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
49492 mempool_free(ep, mp->ep_pool);
49493 return NULL;
49494 }
49495@@ -1023,7 +1023,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
49496 xid = ntohs(fh->fh_ox_id); /* we originated exch */
49497 ep = fc_exch_find(mp, xid);
49498 if (!ep) {
49499- atomic_inc(&mp->stats.xid_not_found);
49500+ atomic_inc_unchecked(&mp->stats.xid_not_found);
49501 reject = FC_RJT_OX_ID;
49502 goto out;
49503 }
49504@@ -1053,7 +1053,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
49505 ep = fc_exch_find(mp, xid);
49506 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
49507 if (ep) {
49508- atomic_inc(&mp->stats.xid_busy);
49509+ atomic_inc_unchecked(&mp->stats.xid_busy);
49510 reject = FC_RJT_RX_ID;
49511 goto rel;
49512 }
49513@@ -1064,7 +1064,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
49514 }
49515 xid = ep->xid; /* get our XID */
49516 } else if (!ep) {
49517- atomic_inc(&mp->stats.xid_not_found);
49518+ atomic_inc_unchecked(&mp->stats.xid_not_found);
49519 reject = FC_RJT_RX_ID; /* XID not found */
49520 goto out;
49521 }
49522@@ -1082,7 +1082,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
49523 } else {
49524 sp = &ep->seq;
49525 if (sp->id != fh->fh_seq_id) {
49526- atomic_inc(&mp->stats.seq_not_found);
49527+ atomic_inc_unchecked(&mp->stats.seq_not_found);
49528 if (f_ctl & FC_FC_END_SEQ) {
49529 /*
49530 * Update sequence_id based on incoming last
49531@@ -1533,22 +1533,22 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
49532
49533 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
49534 if (!ep) {
49535- atomic_inc(&mp->stats.xid_not_found);
49536+ atomic_inc_unchecked(&mp->stats.xid_not_found);
49537 goto out;
49538 }
49539 if (ep->esb_stat & ESB_ST_COMPLETE) {
49540- atomic_inc(&mp->stats.xid_not_found);
49541+ atomic_inc_unchecked(&mp->stats.xid_not_found);
49542 goto rel;
49543 }
49544 if (ep->rxid == FC_XID_UNKNOWN)
49545 ep->rxid = ntohs(fh->fh_rx_id);
49546 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
49547- atomic_inc(&mp->stats.xid_not_found);
49548+ atomic_inc_unchecked(&mp->stats.xid_not_found);
49549 goto rel;
49550 }
49551 if (ep->did != ntoh24(fh->fh_s_id) &&
49552 ep->did != FC_FID_FLOGI) {
49553- atomic_inc(&mp->stats.xid_not_found);
49554+ atomic_inc_unchecked(&mp->stats.xid_not_found);
49555 goto rel;
49556 }
49557 sof = fr_sof(fp);
49558@@ -1557,7 +1557,7 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
49559 sp->ssb_stat |= SSB_ST_RESP;
49560 sp->id = fh->fh_seq_id;
49561 } else if (sp->id != fh->fh_seq_id) {
49562- atomic_inc(&mp->stats.seq_not_found);
49563+ atomic_inc_unchecked(&mp->stats.seq_not_found);
49564 goto rel;
49565 }
49566
49567@@ -1619,9 +1619,9 @@ static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
49568 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
49569
49570 if (!sp)
49571- atomic_inc(&mp->stats.xid_not_found);
49572+ atomic_inc_unchecked(&mp->stats.xid_not_found);
49573 else
49574- atomic_inc(&mp->stats.non_bls_resp);
49575+ atomic_inc_unchecked(&mp->stats.non_bls_resp);
49576
49577 fc_frame_free(fp);
49578 }
49579@@ -2261,13 +2261,13 @@ void fc_exch_update_stats(struct fc_lport *lport)
49580
49581 list_for_each_entry(ema, &lport->ema_list, ema_list) {
49582 mp = ema->mp;
49583- st->fc_no_free_exch += atomic_read(&mp->stats.no_free_exch);
49584+ st->fc_no_free_exch += atomic_read_unchecked(&mp->stats.no_free_exch);
49585 st->fc_no_free_exch_xid +=
49586- atomic_read(&mp->stats.no_free_exch_xid);
49587- st->fc_xid_not_found += atomic_read(&mp->stats.xid_not_found);
49588- st->fc_xid_busy += atomic_read(&mp->stats.xid_busy);
49589- st->fc_seq_not_found += atomic_read(&mp->stats.seq_not_found);
49590- st->fc_non_bls_resp += atomic_read(&mp->stats.non_bls_resp);
49591+ atomic_read_unchecked(&mp->stats.no_free_exch_xid);
49592+ st->fc_xid_not_found += atomic_read_unchecked(&mp->stats.xid_not_found);
49593+ st->fc_xid_busy += atomic_read_unchecked(&mp->stats.xid_busy);
49594+ st->fc_seq_not_found += atomic_read_unchecked(&mp->stats.seq_not_found);
49595+ st->fc_non_bls_resp += atomic_read_unchecked(&mp->stats.non_bls_resp);
49596 }
49597 }
49598 EXPORT_SYMBOL(fc_exch_update_stats);
49599diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
49600index d289583..b745eec 100644
49601--- a/drivers/scsi/libsas/sas_ata.c
49602+++ b/drivers/scsi/libsas/sas_ata.c
49603@@ -554,7 +554,7 @@ static struct ata_port_operations sas_sata_ops = {
49604 .postreset = ata_std_postreset,
49605 .error_handler = ata_std_error_handler,
49606 .post_internal_cmd = sas_ata_post_internal,
49607- .qc_defer = ata_std_qc_defer,
49608+ .qc_defer = ata_std_qc_defer,
49609 .qc_prep = ata_noop_qc_prep,
49610 .qc_issue = sas_ata_qc_issue,
49611 .qc_fill_rtf = sas_ata_qc_fill_rtf,
49612diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
49613index 4e1b75c..0bbdfa9 100644
49614--- a/drivers/scsi/lpfc/lpfc.h
49615+++ b/drivers/scsi/lpfc/lpfc.h
49616@@ -432,7 +432,7 @@ struct lpfc_vport {
49617 struct dentry *debug_nodelist;
49618 struct dentry *vport_debugfs_root;
49619 struct lpfc_debugfs_trc *disc_trc;
49620- atomic_t disc_trc_cnt;
49621+ atomic_unchecked_t disc_trc_cnt;
49622 #endif
49623 uint8_t stat_data_enabled;
49624 uint8_t stat_data_blocked;
49625@@ -865,8 +865,8 @@ struct lpfc_hba {
49626 struct timer_list fabric_block_timer;
49627 unsigned long bit_flags;
49628 #define FABRIC_COMANDS_BLOCKED 0
49629- atomic_t num_rsrc_err;
49630- atomic_t num_cmd_success;
49631+ atomic_unchecked_t num_rsrc_err;
49632+ atomic_unchecked_t num_cmd_success;
49633 unsigned long last_rsrc_error_time;
49634 unsigned long last_ramp_down_time;
49635 unsigned long last_ramp_up_time;
49636@@ -902,7 +902,7 @@ struct lpfc_hba {
49637
49638 struct dentry *debug_slow_ring_trc;
49639 struct lpfc_debugfs_trc *slow_ring_trc;
49640- atomic_t slow_ring_trc_cnt;
49641+ atomic_unchecked_t slow_ring_trc_cnt;
49642 /* iDiag debugfs sub-directory */
49643 struct dentry *idiag_root;
49644 struct dentry *idiag_pci_cfg;
49645diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
49646index b800cc9..16b6a91 100644
49647--- a/drivers/scsi/lpfc/lpfc_debugfs.c
49648+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
49649@@ -106,7 +106,7 @@ MODULE_PARM_DESC(lpfc_debugfs_mask_disc_trc,
49650
49651 #include <linux/debugfs.h>
49652
49653-static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
49654+static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
49655 static unsigned long lpfc_debugfs_start_time = 0L;
49656
49657 /* iDiag */
49658@@ -147,7 +147,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size)
49659 lpfc_debugfs_enable = 0;
49660
49661 len = 0;
49662- index = (atomic_read(&vport->disc_trc_cnt) + 1) &
49663+ index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
49664 (lpfc_debugfs_max_disc_trc - 1);
49665 for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
49666 dtp = vport->disc_trc + i;
49667@@ -213,7 +213,7 @@ lpfc_debugfs_slow_ring_trc_data(struct lpfc_hba *phba, char *buf, int size)
49668 lpfc_debugfs_enable = 0;
49669
49670 len = 0;
49671- index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
49672+ index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
49673 (lpfc_debugfs_max_slow_ring_trc - 1);
49674 for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
49675 dtp = phba->slow_ring_trc + i;
49676@@ -646,14 +646,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport *vport, int mask, char *fmt,
49677 !vport || !vport->disc_trc)
49678 return;
49679
49680- index = atomic_inc_return(&vport->disc_trc_cnt) &
49681+ index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
49682 (lpfc_debugfs_max_disc_trc - 1);
49683 dtp = vport->disc_trc + index;
49684 dtp->fmt = fmt;
49685 dtp->data1 = data1;
49686 dtp->data2 = data2;
49687 dtp->data3 = data3;
49688- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
49689+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
49690 dtp->jif = jiffies;
49691 #endif
49692 return;
49693@@ -684,14 +684,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_hba *phba, char *fmt,
49694 !phba || !phba->slow_ring_trc)
49695 return;
49696
49697- index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
49698+ index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
49699 (lpfc_debugfs_max_slow_ring_trc - 1);
49700 dtp = phba->slow_ring_trc + index;
49701 dtp->fmt = fmt;
49702 dtp->data1 = data1;
49703 dtp->data2 = data2;
49704 dtp->data3 = data3;
49705- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
49706+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
49707 dtp->jif = jiffies;
49708 #endif
49709 return;
49710@@ -4168,7 +4168,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
49711 "slow_ring buffer\n");
49712 goto debug_failed;
49713 }
49714- atomic_set(&phba->slow_ring_trc_cnt, 0);
49715+ atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
49716 memset(phba->slow_ring_trc, 0,
49717 (sizeof(struct lpfc_debugfs_trc) *
49718 lpfc_debugfs_max_slow_ring_trc));
49719@@ -4214,7 +4214,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
49720 "buffer\n");
49721 goto debug_failed;
49722 }
49723- atomic_set(&vport->disc_trc_cnt, 0);
49724+ atomic_set_unchecked(&vport->disc_trc_cnt, 0);
49725
49726 snprintf(name, sizeof(name), "discovery_trace");
49727 vport->debug_disc_trc =
49728diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
49729index 68c94cc..8c27be5 100644
49730--- a/drivers/scsi/lpfc/lpfc_init.c
49731+++ b/drivers/scsi/lpfc/lpfc_init.c
49732@@ -10949,8 +10949,10 @@ lpfc_init(void)
49733 "misc_register returned with status %d", error);
49734
49735 if (lpfc_enable_npiv) {
49736- lpfc_transport_functions.vport_create = lpfc_vport_create;
49737- lpfc_transport_functions.vport_delete = lpfc_vport_delete;
49738+ pax_open_kernel();
49739+ *(void **)&lpfc_transport_functions.vport_create = lpfc_vport_create;
49740+ *(void **)&lpfc_transport_functions.vport_delete = lpfc_vport_delete;
49741+ pax_close_kernel();
49742 }
49743 lpfc_transport_template =
49744 fc_attach_transport(&lpfc_transport_functions);
49745diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
49746index b2ede05..aaf482ca 100644
49747--- a/drivers/scsi/lpfc/lpfc_scsi.c
49748+++ b/drivers/scsi/lpfc/lpfc_scsi.c
49749@@ -353,7 +353,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
49750 uint32_t evt_posted;
49751
49752 spin_lock_irqsave(&phba->hbalock, flags);
49753- atomic_inc(&phba->num_rsrc_err);
49754+ atomic_inc_unchecked(&phba->num_rsrc_err);
49755 phba->last_rsrc_error_time = jiffies;
49756
49757 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
49758@@ -394,7 +394,7 @@ lpfc_rampup_queue_depth(struct lpfc_vport *vport,
49759 unsigned long flags;
49760 struct lpfc_hba *phba = vport->phba;
49761 uint32_t evt_posted;
49762- atomic_inc(&phba->num_cmd_success);
49763+ atomic_inc_unchecked(&phba->num_cmd_success);
49764
49765 if (vport->cfg_lun_queue_depth <= queue_depth)
49766 return;
49767@@ -438,8 +438,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
49768 unsigned long num_rsrc_err, num_cmd_success;
49769 int i;
49770
49771- num_rsrc_err = atomic_read(&phba->num_rsrc_err);
49772- num_cmd_success = atomic_read(&phba->num_cmd_success);
49773+ num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
49774+ num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
49775
49776 /*
49777 * The error and success command counters are global per
49778@@ -467,8 +467,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
49779 }
49780 }
49781 lpfc_destroy_vport_work_array(phba, vports);
49782- atomic_set(&phba->num_rsrc_err, 0);
49783- atomic_set(&phba->num_cmd_success, 0);
49784+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
49785+ atomic_set_unchecked(&phba->num_cmd_success, 0);
49786 }
49787
49788 /**
49789@@ -502,8 +502,8 @@ lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
49790 }
49791 }
49792 lpfc_destroy_vport_work_array(phba, vports);
49793- atomic_set(&phba->num_rsrc_err, 0);
49794- atomic_set(&phba->num_cmd_success, 0);
49795+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
49796+ atomic_set_unchecked(&phba->num_cmd_success, 0);
49797 }
49798
49799 /**
49800diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
49801index 6fd7d40..b444223 100644
49802--- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
49803+++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
49804@@ -1557,7 +1557,7 @@ _scsih_get_resync(struct device *dev)
49805 {
49806 struct scsi_device *sdev = to_scsi_device(dev);
49807 struct MPT2SAS_ADAPTER *ioc = shost_priv(sdev->host);
49808- static struct _raid_device *raid_device;
49809+ struct _raid_device *raid_device;
49810 unsigned long flags;
49811 Mpi2RaidVolPage0_t vol_pg0;
49812 Mpi2ConfigReply_t mpi_reply;
49813@@ -1609,7 +1609,7 @@ _scsih_get_state(struct device *dev)
49814 {
49815 struct scsi_device *sdev = to_scsi_device(dev);
49816 struct MPT2SAS_ADAPTER *ioc = shost_priv(sdev->host);
49817- static struct _raid_device *raid_device;
49818+ struct _raid_device *raid_device;
49819 unsigned long flags;
49820 Mpi2RaidVolPage0_t vol_pg0;
49821 Mpi2ConfigReply_t mpi_reply;
49822@@ -6637,7 +6637,7 @@ _scsih_sas_ir_operation_status_event(struct MPT2SAS_ADAPTER *ioc,
49823 struct fw_event_work *fw_event)
49824 {
49825 Mpi2EventDataIrOperationStatus_t *event_data = fw_event->event_data;
49826- static struct _raid_device *raid_device;
49827+ struct _raid_device *raid_device;
49828 unsigned long flags;
49829 u16 handle;
49830
49831@@ -7108,7 +7108,7 @@ _scsih_scan_for_devices_after_reset(struct MPT2SAS_ADAPTER *ioc)
49832 u64 sas_address;
49833 struct _sas_device *sas_device;
49834 struct _sas_node *expander_device;
49835- static struct _raid_device *raid_device;
49836+ struct _raid_device *raid_device;
49837 u8 retry_count;
49838 unsigned long flags;
49839
49840diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
49841index be8ce54..94ed33a 100644
49842--- a/drivers/scsi/pmcraid.c
49843+++ b/drivers/scsi/pmcraid.c
49844@@ -200,8 +200,8 @@ static int pmcraid_slave_alloc(struct scsi_device *scsi_dev)
49845 res->scsi_dev = scsi_dev;
49846 scsi_dev->hostdata = res;
49847 res->change_detected = 0;
49848- atomic_set(&res->read_failures, 0);
49849- atomic_set(&res->write_failures, 0);
49850+ atomic_set_unchecked(&res->read_failures, 0);
49851+ atomic_set_unchecked(&res->write_failures, 0);
49852 rc = 0;
49853 }
49854 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
49855@@ -2687,9 +2687,9 @@ static int pmcraid_error_handler(struct pmcraid_cmd *cmd)
49856
49857 /* If this was a SCSI read/write command keep count of errors */
49858 if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
49859- atomic_inc(&res->read_failures);
49860+ atomic_inc_unchecked(&res->read_failures);
49861 else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
49862- atomic_inc(&res->write_failures);
49863+ atomic_inc_unchecked(&res->write_failures);
49864
49865 if (!RES_IS_GSCSI(res->cfg_entry) &&
49866 masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
49867@@ -3545,7 +3545,7 @@ static int pmcraid_queuecommand_lck(
49868 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
49869 * hrrq_id assigned here in queuecommand
49870 */
49871- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
49872+ ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
49873 pinstance->num_hrrq;
49874 cmd->cmd_done = pmcraid_io_done;
49875
49876@@ -3857,7 +3857,7 @@ static long pmcraid_ioctl_passthrough(
49877 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
49878 * hrrq_id assigned here in queuecommand
49879 */
49880- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
49881+ ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
49882 pinstance->num_hrrq;
49883
49884 if (request_size) {
49885@@ -4495,7 +4495,7 @@ static void pmcraid_worker_function(struct work_struct *workp)
49886
49887 pinstance = container_of(workp, struct pmcraid_instance, worker_q);
49888 /* add resources only after host is added into system */
49889- if (!atomic_read(&pinstance->expose_resources))
49890+ if (!atomic_read_unchecked(&pinstance->expose_resources))
49891 return;
49892
49893 fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
49894@@ -5322,8 +5322,8 @@ static int pmcraid_init_instance(struct pci_dev *pdev, struct Scsi_Host *host,
49895 init_waitqueue_head(&pinstance->reset_wait_q);
49896
49897 atomic_set(&pinstance->outstanding_cmds, 0);
49898- atomic_set(&pinstance->last_message_id, 0);
49899- atomic_set(&pinstance->expose_resources, 0);
49900+ atomic_set_unchecked(&pinstance->last_message_id, 0);
49901+ atomic_set_unchecked(&pinstance->expose_resources, 0);
49902
49903 INIT_LIST_HEAD(&pinstance->free_res_q);
49904 INIT_LIST_HEAD(&pinstance->used_res_q);
49905@@ -6036,7 +6036,7 @@ static int pmcraid_probe(struct pci_dev *pdev,
49906 /* Schedule worker thread to handle CCN and take care of adding and
49907 * removing devices to OS
49908 */
49909- atomic_set(&pinstance->expose_resources, 1);
49910+ atomic_set_unchecked(&pinstance->expose_resources, 1);
49911 schedule_work(&pinstance->worker_q);
49912 return rc;
49913
49914diff --git a/drivers/scsi/pmcraid.h b/drivers/scsi/pmcraid.h
49915index e1d150f..6c6df44 100644
49916--- a/drivers/scsi/pmcraid.h
49917+++ b/drivers/scsi/pmcraid.h
49918@@ -748,7 +748,7 @@ struct pmcraid_instance {
49919 struct pmcraid_isr_param hrrq_vector[PMCRAID_NUM_MSIX_VECTORS];
49920
49921 /* Message id as filled in last fired IOARCB, used to identify HRRQ */
49922- atomic_t last_message_id;
49923+ atomic_unchecked_t last_message_id;
49924
49925 /* configuration table */
49926 struct pmcraid_config_table *cfg_table;
49927@@ -777,7 +777,7 @@ struct pmcraid_instance {
49928 atomic_t outstanding_cmds;
49929
49930 /* should add/delete resources to mid-layer now ?*/
49931- atomic_t expose_resources;
49932+ atomic_unchecked_t expose_resources;
49933
49934
49935
49936@@ -813,8 +813,8 @@ struct pmcraid_resource_entry {
49937 struct pmcraid_config_table_entry_ext cfg_entry_ext;
49938 };
49939 struct scsi_device *scsi_dev; /* Link scsi_device structure */
49940- atomic_t read_failures; /* count of failed READ commands */
49941- atomic_t write_failures; /* count of failed WRITE commands */
49942+ atomic_unchecked_t read_failures; /* count of failed READ commands */
49943+ atomic_unchecked_t write_failures; /* count of failed WRITE commands */
49944
49945 /* To indicate add/delete/modify during CCN */
49946 u8 change_detected;
49947diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
49948index 4a0d7c9..3d658d7 100644
49949--- a/drivers/scsi/qla2xxx/qla_attr.c
49950+++ b/drivers/scsi/qla2xxx/qla_attr.c
49951@@ -2038,7 +2038,7 @@ qla24xx_vport_disable(struct fc_vport *fc_vport, bool disable)
49952 return 0;
49953 }
49954
49955-struct fc_function_template qla2xxx_transport_functions = {
49956+fc_function_template_no_const qla2xxx_transport_functions = {
49957
49958 .show_host_node_name = 1,
49959 .show_host_port_name = 1,
49960@@ -2086,7 +2086,7 @@ struct fc_function_template qla2xxx_transport_functions = {
49961 .bsg_timeout = qla24xx_bsg_timeout,
49962 };
49963
49964-struct fc_function_template qla2xxx_transport_vport_functions = {
49965+fc_function_template_no_const qla2xxx_transport_vport_functions = {
49966
49967 .show_host_node_name = 1,
49968 .show_host_port_name = 1,
49969diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
49970index 1f42662..bf9836c 100644
49971--- a/drivers/scsi/qla2xxx/qla_gbl.h
49972+++ b/drivers/scsi/qla2xxx/qla_gbl.h
49973@@ -546,8 +546,8 @@ extern void qla2x00_get_sym_node_name(scsi_qla_host_t *, uint8_t *);
49974 struct device_attribute;
49975 extern struct device_attribute *qla2x00_host_attrs[];
49976 struct fc_function_template;
49977-extern struct fc_function_template qla2xxx_transport_functions;
49978-extern struct fc_function_template qla2xxx_transport_vport_functions;
49979+extern fc_function_template_no_const qla2xxx_transport_functions;
49980+extern fc_function_template_no_const qla2xxx_transport_vport_functions;
49981 extern void qla2x00_alloc_sysfs_attr(scsi_qla_host_t *);
49982 extern void qla2x00_free_sysfs_attr(scsi_qla_host_t *, bool);
49983 extern void qla2x00_init_host_attr(scsi_qla_host_t *);
49984diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
49985index 83cb612..9b7b08c 100644
49986--- a/drivers/scsi/qla2xxx/qla_os.c
49987+++ b/drivers/scsi/qla2xxx/qla_os.c
49988@@ -1491,8 +1491,10 @@ qla2x00_config_dma_addressing(struct qla_hw_data *ha)
49989 !pci_set_consistent_dma_mask(ha->pdev, DMA_BIT_MASK(64))) {
49990 /* Ok, a 64bit DMA mask is applicable. */
49991 ha->flags.enable_64bit_addressing = 1;
49992- ha->isp_ops->calc_req_entries = qla2x00_calc_iocbs_64;
49993- ha->isp_ops->build_iocbs = qla2x00_build_scsi_iocbs_64;
49994+ pax_open_kernel();
49995+ *(void **)&ha->isp_ops->calc_req_entries = qla2x00_calc_iocbs_64;
49996+ *(void **)&ha->isp_ops->build_iocbs = qla2x00_build_scsi_iocbs_64;
49997+ pax_close_kernel();
49998 return;
49999 }
50000 }
50001diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
50002index aa67bb9..06d0e2a 100644
50003--- a/drivers/scsi/qla4xxx/ql4_def.h
50004+++ b/drivers/scsi/qla4xxx/ql4_def.h
50005@@ -303,7 +303,7 @@ struct ddb_entry {
50006 * (4000 only) */
50007 atomic_t relogin_timer; /* Max Time to wait for
50008 * relogin to complete */
50009- atomic_t relogin_retry_count; /* Num of times relogin has been
50010+ atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been
50011 * retried */
50012 uint32_t default_time2wait; /* Default Min time between
50013 * relogins (+aens) */
50014diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
50015index c21adc3..1b4155f 100644
50016--- a/drivers/scsi/qla4xxx/ql4_os.c
50017+++ b/drivers/scsi/qla4xxx/ql4_os.c
50018@@ -4463,12 +4463,12 @@ static void qla4xxx_check_relogin_flash_ddb(struct iscsi_cls_session *cls_sess)
50019 */
50020 if (!iscsi_is_session_online(cls_sess)) {
50021 /* Reset retry relogin timer */
50022- atomic_inc(&ddb_entry->relogin_retry_count);
50023+ atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
50024 DEBUG2(ql4_printk(KERN_INFO, ha,
50025 "%s: index[%d] relogin timed out-retrying"
50026 " relogin (%d), retry (%d)\n", __func__,
50027 ddb_entry->fw_ddb_index,
50028- atomic_read(&ddb_entry->relogin_retry_count),
50029+ atomic_read_unchecked(&ddb_entry->relogin_retry_count),
50030 ddb_entry->default_time2wait + 4));
50031 set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
50032 atomic_set(&ddb_entry->retry_relogin_timer,
50033@@ -6552,7 +6552,7 @@ static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha,
50034
50035 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
50036 atomic_set(&ddb_entry->relogin_timer, 0);
50037- atomic_set(&ddb_entry->relogin_retry_count, 0);
50038+ atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
50039 def_timeout = le16_to_cpu(ddb_entry->fw_ddb_entry.def_timeout);
50040 ddb_entry->default_relogin_timeout =
50041 (def_timeout > LOGIN_TOV) && (def_timeout < LOGIN_TOV * 10) ?
50042diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
50043index d8afec8..3ec7152 100644
50044--- a/drivers/scsi/scsi.c
50045+++ b/drivers/scsi/scsi.c
50046@@ -658,7 +658,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
50047 struct Scsi_Host *host = cmd->device->host;
50048 int rtn = 0;
50049
50050- atomic_inc(&cmd->device->iorequest_cnt);
50051+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
50052
50053 /* check if the device is still usable */
50054 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
50055diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
50056index 62ec84b..93159d8 100644
50057--- a/drivers/scsi/scsi_lib.c
50058+++ b/drivers/scsi/scsi_lib.c
50059@@ -1474,7 +1474,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
50060 shost = sdev->host;
50061 scsi_init_cmd_errh(cmd);
50062 cmd->result = DID_NO_CONNECT << 16;
50063- atomic_inc(&cmd->device->iorequest_cnt);
50064+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
50065
50066 /*
50067 * SCSI request completion path will do scsi_device_unbusy(),
50068@@ -1500,9 +1500,9 @@ static void scsi_softirq_done(struct request *rq)
50069
50070 INIT_LIST_HEAD(&cmd->eh_entry);
50071
50072- atomic_inc(&cmd->device->iodone_cnt);
50073+ atomic_inc_unchecked(&cmd->device->iodone_cnt);
50074 if (cmd->result)
50075- atomic_inc(&cmd->device->ioerr_cnt);
50076+ atomic_inc_unchecked(&cmd->device->ioerr_cnt);
50077
50078 disposition = scsi_decide_disposition(cmd);
50079 if (disposition != SUCCESS &&
50080diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
50081index 665acbf..d18fab4 100644
50082--- a/drivers/scsi/scsi_sysfs.c
50083+++ b/drivers/scsi/scsi_sysfs.c
50084@@ -734,7 +734,7 @@ show_iostat_##field(struct device *dev, struct device_attribute *attr, \
50085 char *buf) \
50086 { \
50087 struct scsi_device *sdev = to_scsi_device(dev); \
50088- unsigned long long count = atomic_read(&sdev->field); \
50089+ unsigned long long count = atomic_read_unchecked(&sdev->field); \
50090 return snprintf(buf, 20, "0x%llx\n", count); \
50091 } \
50092 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
50093diff --git a/drivers/scsi/scsi_tgt_lib.c b/drivers/scsi/scsi_tgt_lib.c
50094index 84a1fdf..693b0d6 100644
50095--- a/drivers/scsi/scsi_tgt_lib.c
50096+++ b/drivers/scsi/scsi_tgt_lib.c
50097@@ -362,7 +362,7 @@ static int scsi_map_user_pages(struct scsi_tgt_cmd *tcmd, struct scsi_cmnd *cmd,
50098 int err;
50099
50100 dprintk("%lx %u\n", uaddr, len);
50101- err = blk_rq_map_user(q, rq, NULL, (void *)uaddr, len, GFP_KERNEL);
50102+ err = blk_rq_map_user(q, rq, NULL, (void __user *)uaddr, len, GFP_KERNEL);
50103 if (err) {
50104 /*
50105 * TODO: need to fixup sg_tablesize, max_segment_size,
50106diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
50107index 4628fd5..a94a1c2 100644
50108--- a/drivers/scsi/scsi_transport_fc.c
50109+++ b/drivers/scsi/scsi_transport_fc.c
50110@@ -497,7 +497,7 @@ static DECLARE_TRANSPORT_CLASS(fc_vport_class,
50111 * Netlink Infrastructure
50112 */
50113
50114-static atomic_t fc_event_seq;
50115+static atomic_unchecked_t fc_event_seq;
50116
50117 /**
50118 * fc_get_event_number - Obtain the next sequential FC event number
50119@@ -510,7 +510,7 @@ static atomic_t fc_event_seq;
50120 u32
50121 fc_get_event_number(void)
50122 {
50123- return atomic_add_return(1, &fc_event_seq);
50124+ return atomic_add_return_unchecked(1, &fc_event_seq);
50125 }
50126 EXPORT_SYMBOL(fc_get_event_number);
50127
50128@@ -654,7 +654,7 @@ static __init int fc_transport_init(void)
50129 {
50130 int error;
50131
50132- atomic_set(&fc_event_seq, 0);
50133+ atomic_set_unchecked(&fc_event_seq, 0);
50134
50135 error = transport_class_register(&fc_host_class);
50136 if (error)
50137@@ -844,7 +844,7 @@ static int fc_str_to_dev_loss(const char *buf, unsigned long *val)
50138 char *cp;
50139
50140 *val = simple_strtoul(buf, &cp, 0);
50141- if ((*cp && (*cp != '\n')) || (*val < 0))
50142+ if (*cp && (*cp != '\n'))
50143 return -EINVAL;
50144 /*
50145 * Check for overflow; dev_loss_tmo is u32
50146diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
50147index fd8ffe6..fd0bebf 100644
50148--- a/drivers/scsi/scsi_transport_iscsi.c
50149+++ b/drivers/scsi/scsi_transport_iscsi.c
50150@@ -79,7 +79,7 @@ struct iscsi_internal {
50151 struct transport_container session_cont;
50152 };
50153
50154-static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
50155+static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
50156 static struct workqueue_struct *iscsi_eh_timer_workq;
50157
50158 static DEFINE_IDA(iscsi_sess_ida);
50159@@ -2071,7 +2071,7 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
50160 int err;
50161
50162 ihost = shost->shost_data;
50163- session->sid = atomic_add_return(1, &iscsi_session_nr);
50164+ session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
50165
50166 if (target_id == ISCSI_MAX_TARGET) {
50167 id = ida_simple_get(&iscsi_sess_ida, 0, 0, GFP_KERNEL);
50168@@ -4511,7 +4511,7 @@ static __init int iscsi_transport_init(void)
50169 printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
50170 ISCSI_TRANSPORT_VERSION);
50171
50172- atomic_set(&iscsi_session_nr, 0);
50173+ atomic_set_unchecked(&iscsi_session_nr, 0);
50174
50175 err = class_register(&iscsi_transport_class);
50176 if (err)
50177diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
50178index d47ffc8..30f46a9 100644
50179--- a/drivers/scsi/scsi_transport_srp.c
50180+++ b/drivers/scsi/scsi_transport_srp.c
50181@@ -36,7 +36,7 @@
50182 #include "scsi_transport_srp_internal.h"
50183
50184 struct srp_host_attrs {
50185- atomic_t next_port_id;
50186+ atomic_unchecked_t next_port_id;
50187 };
50188 #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
50189
50190@@ -101,7 +101,7 @@ static int srp_host_setup(struct transport_container *tc, struct device *dev,
50191 struct Scsi_Host *shost = dev_to_shost(dev);
50192 struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
50193
50194- atomic_set(&srp_host->next_port_id, 0);
50195+ atomic_set_unchecked(&srp_host->next_port_id, 0);
50196 return 0;
50197 }
50198
50199@@ -734,7 +734,7 @@ struct srp_rport *srp_rport_add(struct Scsi_Host *shost,
50200 rport_fast_io_fail_timedout);
50201 INIT_DELAYED_WORK(&rport->dev_loss_work, rport_dev_loss_timedout);
50202
50203- id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
50204+ id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
50205 dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
50206
50207 transport_setup_device(&rport->dev);
50208diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
50209index 36d1a23..3f33303 100644
50210--- a/drivers/scsi/sd.c
50211+++ b/drivers/scsi/sd.c
50212@@ -2962,7 +2962,7 @@ static int sd_probe(struct device *dev)
50213 sdkp->disk = gd;
50214 sdkp->index = index;
50215 atomic_set(&sdkp->openers, 0);
50216- atomic_set(&sdkp->device->ioerr_cnt, 0);
50217+ atomic_set_unchecked(&sdkp->device->ioerr_cnt, 0);
50218
50219 if (!sdp->request_queue->rq_timeout) {
50220 if (sdp->type != TYPE_MOD)
50221diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
50222index df5e961..df6b97f 100644
50223--- a/drivers/scsi/sg.c
50224+++ b/drivers/scsi/sg.c
50225@@ -1102,7 +1102,7 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
50226 sdp->disk->disk_name,
50227 MKDEV(SCSI_GENERIC_MAJOR, sdp->index),
50228 NULL,
50229- (char *)arg);
50230+ (char __user *)arg);
50231 case BLKTRACESTART:
50232 return blk_trace_startstop(sdp->device->request_queue, 1);
50233 case BLKTRACESTOP:
50234diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
50235index fbf3b22..f5c8b60 100644
50236--- a/drivers/spi/spi.c
50237+++ b/drivers/spi/spi.c
50238@@ -1980,7 +1980,7 @@ int spi_bus_unlock(struct spi_master *master)
50239 EXPORT_SYMBOL_GPL(spi_bus_unlock);
50240
50241 /* portable code must never pass more than 32 bytes */
50242-#define SPI_BUFSIZ max(32, SMP_CACHE_BYTES)
50243+#define SPI_BUFSIZ max(32UL, SMP_CACHE_BYTES)
50244
50245 static u8 *buf;
50246
50247diff --git a/drivers/staging/android/timed_output.c b/drivers/staging/android/timed_output.c
50248index 2c61783..4d49e4e 100644
50249--- a/drivers/staging/android/timed_output.c
50250+++ b/drivers/staging/android/timed_output.c
50251@@ -25,7 +25,7 @@
50252 #include "timed_output.h"
50253
50254 static struct class *timed_output_class;
50255-static atomic_t device_count;
50256+static atomic_unchecked_t device_count;
50257
50258 static ssize_t enable_show(struct device *dev, struct device_attribute *attr,
50259 char *buf)
50260@@ -63,7 +63,7 @@ static int create_timed_output_class(void)
50261 timed_output_class = class_create(THIS_MODULE, "timed_output");
50262 if (IS_ERR(timed_output_class))
50263 return PTR_ERR(timed_output_class);
50264- atomic_set(&device_count, 0);
50265+ atomic_set_unchecked(&device_count, 0);
50266 timed_output_class->dev_groups = timed_output_groups;
50267 }
50268
50269@@ -81,7 +81,7 @@ int timed_output_dev_register(struct timed_output_dev *tdev)
50270 if (ret < 0)
50271 return ret;
50272
50273- tdev->index = atomic_inc_return(&device_count);
50274+ tdev->index = atomic_inc_return_unchecked(&device_count);
50275 tdev->dev = device_create(timed_output_class, NULL,
50276 MKDEV(0, tdev->index), NULL, "%s", tdev->name);
50277 if (IS_ERR(tdev->dev))
50278diff --git a/drivers/staging/gdm724x/gdm_tty.c b/drivers/staging/gdm724x/gdm_tty.c
50279index fe47cd3..19a1bd1 100644
50280--- a/drivers/staging/gdm724x/gdm_tty.c
50281+++ b/drivers/staging/gdm724x/gdm_tty.c
50282@@ -44,7 +44,7 @@
50283 #define gdm_tty_send_control(n, r, v, d, l) (\
50284 n->tty_dev->send_control(n->tty_dev->priv_dev, r, v, d, l))
50285
50286-#define GDM_TTY_READY(gdm) (gdm && gdm->tty_dev && gdm->port.count)
50287+#define GDM_TTY_READY(gdm) (gdm && gdm->tty_dev && atomic_read(&gdm->port.count))
50288
50289 static struct tty_driver *gdm_driver[TTY_MAX_COUNT];
50290 static struct gdm *gdm_table[TTY_MAX_COUNT][GDM_TTY_MINOR];
50291diff --git a/drivers/staging/imx-drm/imx-drm-core.c b/drivers/staging/imx-drm/imx-drm-core.c
50292index 236ed66..dd9cd74 100644
50293--- a/drivers/staging/imx-drm/imx-drm-core.c
50294+++ b/drivers/staging/imx-drm/imx-drm-core.c
50295@@ -488,7 +488,7 @@ int imx_drm_add_crtc(struct drm_crtc *crtc,
50296 goto err_busy;
50297 }
50298
50299- if (imxdrm->drm->open_count) {
50300+ if (local_read(&imxdrm->drm->open_count)) {
50301 ret = -EBUSY;
50302 goto err_busy;
50303 }
50304@@ -576,7 +576,7 @@ int imx_drm_add_encoder(struct drm_encoder *encoder,
50305
50306 mutex_lock(&imxdrm->mutex);
50307
50308- if (imxdrm->drm->open_count) {
50309+ if (local_read(&imxdrm->drm->open_count)) {
50310 ret = -EBUSY;
50311 goto err_busy;
50312 }
50313@@ -715,7 +715,7 @@ int imx_drm_add_connector(struct drm_connector *connector,
50314
50315 mutex_lock(&imxdrm->mutex);
50316
50317- if (imxdrm->drm->open_count) {
50318+ if (local_read(&imxdrm->drm->open_count)) {
50319 ret = -EBUSY;
50320 goto err_busy;
50321 }
50322diff --git a/drivers/staging/lustre/lnet/selftest/brw_test.c b/drivers/staging/lustre/lnet/selftest/brw_test.c
50323index 3f8020c..649fded 100644
50324--- a/drivers/staging/lustre/lnet/selftest/brw_test.c
50325+++ b/drivers/staging/lustre/lnet/selftest/brw_test.c
50326@@ -488,13 +488,11 @@ brw_server_handle(struct srpc_server_rpc *rpc)
50327 return 0;
50328 }
50329
50330-sfw_test_client_ops_t brw_test_client;
50331-void brw_init_test_client(void)
50332-{
50333- brw_test_client.tso_init = brw_client_init;
50334- brw_test_client.tso_fini = brw_client_fini;
50335- brw_test_client.tso_prep_rpc = brw_client_prep_rpc;
50336- brw_test_client.tso_done_rpc = brw_client_done_rpc;
50337+sfw_test_client_ops_t brw_test_client = {
50338+ .tso_init = brw_client_init,
50339+ .tso_fini = brw_client_fini,
50340+ .tso_prep_rpc = brw_client_prep_rpc,
50341+ .tso_done_rpc = brw_client_done_rpc,
50342 };
50343
50344 srpc_service_t brw_test_service;
50345diff --git a/drivers/staging/lustre/lnet/selftest/framework.c b/drivers/staging/lustre/lnet/selftest/framework.c
50346index 050723a..fa6fdf1 100644
50347--- a/drivers/staging/lustre/lnet/selftest/framework.c
50348+++ b/drivers/staging/lustre/lnet/selftest/framework.c
50349@@ -1635,12 +1635,10 @@ static srpc_service_t sfw_services[] =
50350
50351 extern sfw_test_client_ops_t ping_test_client;
50352 extern srpc_service_t ping_test_service;
50353-extern void ping_init_test_client(void);
50354 extern void ping_init_test_service(void);
50355
50356 extern sfw_test_client_ops_t brw_test_client;
50357 extern srpc_service_t brw_test_service;
50358-extern void brw_init_test_client(void);
50359 extern void brw_init_test_service(void);
50360
50361
50362@@ -1684,12 +1682,10 @@ sfw_startup (void)
50363 INIT_LIST_HEAD(&sfw_data.fw_zombie_rpcs);
50364 INIT_LIST_HEAD(&sfw_data.fw_zombie_sessions);
50365
50366- brw_init_test_client();
50367 brw_init_test_service();
50368 rc = sfw_register_test(&brw_test_service, &brw_test_client);
50369 LASSERT (rc == 0);
50370
50371- ping_init_test_client();
50372 ping_init_test_service();
50373 rc = sfw_register_test(&ping_test_service, &ping_test_client);
50374 LASSERT (rc == 0);
50375diff --git a/drivers/staging/lustre/lnet/selftest/ping_test.c b/drivers/staging/lustre/lnet/selftest/ping_test.c
50376index 750cac4..e4d751f 100644
50377--- a/drivers/staging/lustre/lnet/selftest/ping_test.c
50378+++ b/drivers/staging/lustre/lnet/selftest/ping_test.c
50379@@ -211,14 +211,12 @@ ping_server_handle(struct srpc_server_rpc *rpc)
50380 return 0;
50381 }
50382
50383-sfw_test_client_ops_t ping_test_client;
50384-void ping_init_test_client(void)
50385-{
50386- ping_test_client.tso_init = ping_client_init;
50387- ping_test_client.tso_fini = ping_client_fini;
50388- ping_test_client.tso_prep_rpc = ping_client_prep_rpc;
50389- ping_test_client.tso_done_rpc = ping_client_done_rpc;
50390-}
50391+sfw_test_client_ops_t ping_test_client = {
50392+ .tso_init = ping_client_init,
50393+ .tso_fini = ping_client_fini,
50394+ .tso_prep_rpc = ping_client_prep_rpc,
50395+ .tso_done_rpc = ping_client_done_rpc,
50396+};
50397
50398 srpc_service_t ping_test_service;
50399 void ping_init_test_service(void)
50400diff --git a/drivers/staging/lustre/lustre/include/lustre_dlm.h b/drivers/staging/lustre/lustre/include/lustre_dlm.h
50401index ec4bb5e..740c6dd 100644
50402--- a/drivers/staging/lustre/lustre/include/lustre_dlm.h
50403+++ b/drivers/staging/lustre/lustre/include/lustre_dlm.h
50404@@ -1141,7 +1141,7 @@ struct ldlm_callback_suite {
50405 ldlm_completion_callback lcs_completion;
50406 ldlm_blocking_callback lcs_blocking;
50407 ldlm_glimpse_callback lcs_glimpse;
50408-};
50409+} __no_const;
50410
50411 /* ldlm_lockd.c */
50412 int ldlm_del_waiting_lock(struct ldlm_lock *lock);
50413diff --git a/drivers/staging/lustre/lustre/include/obd.h b/drivers/staging/lustre/lustre/include/obd.h
50414index c3470ce..2bef527 100644
50415--- a/drivers/staging/lustre/lustre/include/obd.h
50416+++ b/drivers/staging/lustre/lustre/include/obd.h
50417@@ -1426,7 +1426,7 @@ struct md_ops {
50418 * lprocfs_alloc_md_stats() in obdclass/lprocfs_status.c. Also, add a
50419 * wrapper function in include/linux/obd_class.h.
50420 */
50421-};
50422+} __no_const;
50423
50424 struct lsm_operations {
50425 void (*lsm_free)(struct lov_stripe_md *);
50426diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c b/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c
50427index c9aae13..60ea292 100644
50428--- a/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c
50429+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c
50430@@ -239,7 +239,7 @@ ldlm_process_flock_lock(struct ldlm_lock *req, __u64 *flags, int first_enq,
50431 int added = (mode == LCK_NL);
50432 int overlaps = 0;
50433 int splitted = 0;
50434- const struct ldlm_callback_suite null_cbs = { NULL };
50435+ const struct ldlm_callback_suite null_cbs = { };
50436
50437 CDEBUG(D_DLMTRACE, "flags %#llx owner "LPU64" pid %u mode %u start "
50438 LPU64" end "LPU64"\n", *flags,
50439diff --git a/drivers/staging/lustre/lustre/libcfs/linux/linux-proc.c b/drivers/staging/lustre/lustre/libcfs/linux/linux-proc.c
50440index e947b91..f408990 100644
50441--- a/drivers/staging/lustre/lustre/libcfs/linux/linux-proc.c
50442+++ b/drivers/staging/lustre/lustre/libcfs/linux/linux-proc.c
50443@@ -217,7 +217,7 @@ DECLARE_PROC_HANDLER(proc_debug_mb)
50444 int LL_PROC_PROTO(proc_console_max_delay_cs)
50445 {
50446 int rc, max_delay_cs;
50447- ctl_table_t dummy = *table;
50448+ ctl_table_no_const dummy = *table;
50449 cfs_duration_t d;
50450
50451 dummy.data = &max_delay_cs;
50452@@ -248,7 +248,7 @@ int LL_PROC_PROTO(proc_console_max_delay_cs)
50453 int LL_PROC_PROTO(proc_console_min_delay_cs)
50454 {
50455 int rc, min_delay_cs;
50456- ctl_table_t dummy = *table;
50457+ ctl_table_no_const dummy = *table;
50458 cfs_duration_t d;
50459
50460 dummy.data = &min_delay_cs;
50461@@ -279,7 +279,7 @@ int LL_PROC_PROTO(proc_console_min_delay_cs)
50462 int LL_PROC_PROTO(proc_console_backoff)
50463 {
50464 int rc, backoff;
50465- ctl_table_t dummy = *table;
50466+ ctl_table_no_const dummy = *table;
50467
50468 dummy.data = &backoff;
50469 dummy.proc_handler = &proc_dointvec;
50470diff --git a/drivers/staging/lustre/lustre/libcfs/module.c b/drivers/staging/lustre/lustre/libcfs/module.c
50471index 24ae26d..9d09cab 100644
50472--- a/drivers/staging/lustre/lustre/libcfs/module.c
50473+++ b/drivers/staging/lustre/lustre/libcfs/module.c
50474@@ -313,11 +313,11 @@ out:
50475
50476
50477 struct cfs_psdev_ops libcfs_psdev_ops = {
50478- libcfs_psdev_open,
50479- libcfs_psdev_release,
50480- NULL,
50481- NULL,
50482- libcfs_ioctl
50483+ .p_open = libcfs_psdev_open,
50484+ .p_close = libcfs_psdev_release,
50485+ .p_read = NULL,
50486+ .p_write = NULL,
50487+ .p_ioctl = libcfs_ioctl
50488 };
50489
50490 extern int insert_proc(void);
50491diff --git a/drivers/staging/lustre/lustre/llite/dir.c b/drivers/staging/lustre/lustre/llite/dir.c
50492index 52b7731..d604da0 100644
50493--- a/drivers/staging/lustre/lustre/llite/dir.c
50494+++ b/drivers/staging/lustre/lustre/llite/dir.c
50495@@ -660,7 +660,7 @@ int ll_dir_setdirstripe(struct inode *dir, struct lmv_user_md *lump,
50496 int mode;
50497 int err;
50498
50499- mode = (0755 & (S_IRWXUGO|S_ISVTX) & ~current->fs->umask) | S_IFDIR;
50500+ mode = (0755 & (S_IRWXUGO|S_ISVTX) & ~current_umask()) | S_IFDIR;
50501 op_data = ll_prep_md_op_data(NULL, dir, NULL, filename,
50502 strlen(filename), mode, LUSTRE_OPC_MKDIR,
50503 lump);
50504diff --git a/drivers/staging/media/solo6x10/solo6x10-core.c b/drivers/staging/media/solo6x10/solo6x10-core.c
50505index 480b7c4..6846324 100644
50506--- a/drivers/staging/media/solo6x10/solo6x10-core.c
50507+++ b/drivers/staging/media/solo6x10/solo6x10-core.c
50508@@ -434,7 +434,7 @@ static void solo_device_release(struct device *dev)
50509
50510 static int solo_sysfs_init(struct solo_dev *solo_dev)
50511 {
50512- struct bin_attribute *sdram_attr = &solo_dev->sdram_attr;
50513+ bin_attribute_no_const *sdram_attr = &solo_dev->sdram_attr;
50514 struct device *dev = &solo_dev->dev;
50515 const char *driver;
50516 int i;
50517diff --git a/drivers/staging/media/solo6x10/solo6x10-g723.c b/drivers/staging/media/solo6x10/solo6x10-g723.c
50518index 1db18c7..35e6afc 100644
50519--- a/drivers/staging/media/solo6x10/solo6x10-g723.c
50520+++ b/drivers/staging/media/solo6x10/solo6x10-g723.c
50521@@ -355,7 +355,7 @@ static int solo_snd_pcm_init(struct solo_dev *solo_dev)
50522
50523 int solo_g723_init(struct solo_dev *solo_dev)
50524 {
50525- static struct snd_device_ops ops = { NULL };
50526+ static struct snd_device_ops ops = { };
50527 struct snd_card *card;
50528 struct snd_kcontrol_new kctl;
50529 char name[32];
50530diff --git a/drivers/staging/media/solo6x10/solo6x10-p2m.c b/drivers/staging/media/solo6x10/solo6x10-p2m.c
50531index 7f2f247..d999137 100644
50532--- a/drivers/staging/media/solo6x10/solo6x10-p2m.c
50533+++ b/drivers/staging/media/solo6x10/solo6x10-p2m.c
50534@@ -77,7 +77,7 @@ int solo_p2m_dma_desc(struct solo_dev *solo_dev,
50535
50536 /* Get next ID. According to Softlogic, 6110 has problems on !=0 P2M */
50537 if (solo_dev->type != SOLO_DEV_6110 && multi_p2m) {
50538- p2m_id = atomic_inc_return(&solo_dev->p2m_count) % SOLO_NR_P2M;
50539+ p2m_id = atomic_inc_return_unchecked(&solo_dev->p2m_count) % SOLO_NR_P2M;
50540 if (p2m_id < 0)
50541 p2m_id = -p2m_id;
50542 }
50543diff --git a/drivers/staging/media/solo6x10/solo6x10.h b/drivers/staging/media/solo6x10/solo6x10.h
50544index 8964f8b..36eb087 100644
50545--- a/drivers/staging/media/solo6x10/solo6x10.h
50546+++ b/drivers/staging/media/solo6x10/solo6x10.h
50547@@ -237,7 +237,7 @@ struct solo_dev {
50548
50549 /* P2M DMA Engine */
50550 struct solo_p2m_dev p2m_dev[SOLO_NR_P2M];
50551- atomic_t p2m_count;
50552+ atomic_unchecked_t p2m_count;
50553 int p2m_jiffies;
50554 unsigned int p2m_timeouts;
50555
50556diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
50557index a0f4868..139f1fb 100644
50558--- a/drivers/staging/octeon/ethernet-rx.c
50559+++ b/drivers/staging/octeon/ethernet-rx.c
50560@@ -417,11 +417,11 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
50561 /* Increment RX stats for virtual ports */
50562 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
50563 #ifdef CONFIG_64BIT
50564- atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets);
50565- atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes);
50566+ atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_packets);
50567+ atomic64_add_unchecked(skb->len, (atomic64_unchecked_t *)&priv->stats.rx_bytes);
50568 #else
50569- atomic_add(1, (atomic_t *)&priv->stats.rx_packets);
50570- atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes);
50571+ atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_packets);
50572+ atomic_add_unchecked(skb->len, (atomic_unchecked_t *)&priv->stats.rx_bytes);
50573 #endif
50574 }
50575 netif_receive_skb(skb);
50576@@ -432,9 +432,9 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
50577 dev->name);
50578 */
50579 #ifdef CONFIG_64BIT
50580- atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped);
50581+ atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_dropped);
50582 #else
50583- atomic_add(1, (atomic_t *)&priv->stats.rx_dropped);
50584+ atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_dropped);
50585 #endif
50586 dev_kfree_skb_irq(skb);
50587 }
50588diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
50589index 089dc4b..c9a687e 100644
50590--- a/drivers/staging/octeon/ethernet.c
50591+++ b/drivers/staging/octeon/ethernet.c
50592@@ -253,11 +253,11 @@ static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev)
50593 * since the RX tasklet also increments it.
50594 */
50595 #ifdef CONFIG_64BIT
50596- atomic64_add(rx_status.dropped_packets,
50597- (atomic64_t *)&priv->stats.rx_dropped);
50598+ atomic64_add_unchecked(rx_status.dropped_packets,
50599+ (atomic64_unchecked_t *)&priv->stats.rx_dropped);
50600 #else
50601- atomic_add(rx_status.dropped_packets,
50602- (atomic_t *)&priv->stats.rx_dropped);
50603+ atomic_add_unchecked(rx_status.dropped_packets,
50604+ (atomic_unchecked_t *)&priv->stats.rx_dropped);
50605 #endif
50606 }
50607
50608diff --git a/drivers/staging/rtl8188eu/include/hal_intf.h b/drivers/staging/rtl8188eu/include/hal_intf.h
50609index c274b34..f84de76 100644
50610--- a/drivers/staging/rtl8188eu/include/hal_intf.h
50611+++ b/drivers/staging/rtl8188eu/include/hal_intf.h
50612@@ -271,7 +271,7 @@ struct hal_ops {
50613 s32 (*c2h_handler)(struct adapter *padapter,
50614 struct c2h_evt_hdr *c2h_evt);
50615 c2h_id_filter c2h_id_filter_ccx;
50616-};
50617+} __no_const;
50618
50619 enum rt_eeprom_type {
50620 EEPROM_93C46,
50621diff --git a/drivers/staging/rtl8188eu/include/rtw_io.h b/drivers/staging/rtl8188eu/include/rtw_io.h
50622index 3d1dfcc..ff5620a 100644
50623--- a/drivers/staging/rtl8188eu/include/rtw_io.h
50624+++ b/drivers/staging/rtl8188eu/include/rtw_io.h
50625@@ -126,7 +126,7 @@ struct _io_ops {
50626 u32 (*_write_scsi)(struct intf_hdl *pintfhdl, u32 cnt, u8 *pmem);
50627 void (*_read_port_cancel)(struct intf_hdl *pintfhdl);
50628 void (*_write_port_cancel)(struct intf_hdl *pintfhdl);
50629-};
50630+} __no_const;
50631
50632 struct io_req {
50633 struct list_head list;
50634diff --git a/drivers/staging/rtl8712/rtl871x_io.h b/drivers/staging/rtl8712/rtl871x_io.h
50635index dc23395..cf7e9b1 100644
50636--- a/drivers/staging/rtl8712/rtl871x_io.h
50637+++ b/drivers/staging/rtl8712/rtl871x_io.h
50638@@ -108,7 +108,7 @@ struct _io_ops {
50639 u8 *pmem);
50640 u32 (*_write_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt,
50641 u8 *pmem);
50642-};
50643+} __no_const;
50644
50645 struct io_req {
50646 struct list_head list;
50647diff --git a/drivers/staging/sbe-2t3e3/netdev.c b/drivers/staging/sbe-2t3e3/netdev.c
50648index 1f5088b..0e59820 100644
50649--- a/drivers/staging/sbe-2t3e3/netdev.c
50650+++ b/drivers/staging/sbe-2t3e3/netdev.c
50651@@ -51,7 +51,7 @@ static int t3e3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
50652 t3e3_if_config(sc, cmd_2t3e3, (char *)&param, &resp, &rlen);
50653
50654 if (rlen)
50655- if (copy_to_user(data, &resp, rlen))
50656+ if (rlen > sizeof resp || copy_to_user(data, &resp, rlen))
50657 return -EFAULT;
50658
50659 return 0;
50660diff --git a/drivers/staging/usbip/vhci.h b/drivers/staging/usbip/vhci.h
50661index a863a98..d272795 100644
50662--- a/drivers/staging/usbip/vhci.h
50663+++ b/drivers/staging/usbip/vhci.h
50664@@ -83,7 +83,7 @@ struct vhci_hcd {
50665 unsigned resuming:1;
50666 unsigned long re_timeout;
50667
50668- atomic_t seqnum;
50669+ atomic_unchecked_t seqnum;
50670
50671 /*
50672 * NOTE:
50673diff --git a/drivers/staging/usbip/vhci_hcd.c b/drivers/staging/usbip/vhci_hcd.c
50674index 72391ef..7c6717a 100644
50675--- a/drivers/staging/usbip/vhci_hcd.c
50676+++ b/drivers/staging/usbip/vhci_hcd.c
50677@@ -440,7 +440,7 @@ static void vhci_tx_urb(struct urb *urb)
50678
50679 spin_lock(&vdev->priv_lock);
50680
50681- priv->seqnum = atomic_inc_return(&the_controller->seqnum);
50682+ priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
50683 if (priv->seqnum == 0xffff)
50684 dev_info(&urb->dev->dev, "seqnum max\n");
50685
50686@@ -686,7 +686,7 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
50687 return -ENOMEM;
50688 }
50689
50690- unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
50691+ unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
50692 if (unlink->seqnum == 0xffff)
50693 pr_info("seqnum max\n");
50694
50695@@ -890,7 +890,7 @@ static int vhci_start(struct usb_hcd *hcd)
50696 vdev->rhport = rhport;
50697 }
50698
50699- atomic_set(&vhci->seqnum, 0);
50700+ atomic_set_unchecked(&vhci->seqnum, 0);
50701 spin_lock_init(&vhci->lock);
50702
50703 hcd->power_budget = 0; /* no limit */
50704diff --git a/drivers/staging/usbip/vhci_rx.c b/drivers/staging/usbip/vhci_rx.c
50705index d07fcb5..358e1e1 100644
50706--- a/drivers/staging/usbip/vhci_rx.c
50707+++ b/drivers/staging/usbip/vhci_rx.c
50708@@ -80,7 +80,7 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
50709 if (!urb) {
50710 pr_err("cannot find a urb of seqnum %u\n", pdu->base.seqnum);
50711 pr_info("max seqnum %d\n",
50712- atomic_read(&the_controller->seqnum));
50713+ atomic_read_unchecked(&the_controller->seqnum));
50714 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
50715 return;
50716 }
50717diff --git a/drivers/staging/vt6655/hostap.c b/drivers/staging/vt6655/hostap.c
50718index 6eecd53..29317c6 100644
50719--- a/drivers/staging/vt6655/hostap.c
50720+++ b/drivers/staging/vt6655/hostap.c
50721@@ -69,14 +69,13 @@ static int msglevel = MSG_LEVEL_INFO;
50722 *
50723 */
50724
50725+static net_device_ops_no_const apdev_netdev_ops;
50726+
50727 static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
50728 {
50729 PSDevice apdev_priv;
50730 struct net_device *dev = pDevice->dev;
50731 int ret;
50732- const struct net_device_ops apdev_netdev_ops = {
50733- .ndo_start_xmit = pDevice->tx_80211,
50734- };
50735
50736 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
50737
50738@@ -88,6 +87,8 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
50739 *apdev_priv = *pDevice;
50740 eth_hw_addr_inherit(pDevice->apdev, dev);
50741
50742+ /* only half broken now */
50743+ apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
50744 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
50745
50746 pDevice->apdev->type = ARPHRD_IEEE80211;
50747diff --git a/drivers/staging/vt6656/hostap.c b/drivers/staging/vt6656/hostap.c
50748index 67ba48b..24e602f 100644
50749--- a/drivers/staging/vt6656/hostap.c
50750+++ b/drivers/staging/vt6656/hostap.c
50751@@ -60,14 +60,13 @@ static int msglevel =MSG_LEVEL_INFO;
50752 *
50753 */
50754
50755+static net_device_ops_no_const apdev_netdev_ops;
50756+
50757 static int hostap_enable_hostapd(struct vnt_private *pDevice, int rtnl_locked)
50758 {
50759 struct vnt_private *apdev_priv;
50760 struct net_device *dev = pDevice->dev;
50761 int ret;
50762- const struct net_device_ops apdev_netdev_ops = {
50763- .ndo_start_xmit = pDevice->tx_80211,
50764- };
50765
50766 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
50767
50768@@ -79,6 +78,8 @@ static int hostap_enable_hostapd(struct vnt_private *pDevice, int rtnl_locked)
50769 *apdev_priv = *pDevice;
50770 memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
50771
50772+ /* only half broken now */
50773+ apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
50774 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
50775
50776 pDevice->apdev->type = ARPHRD_IEEE80211;
50777diff --git a/drivers/target/sbp/sbp_target.c b/drivers/target/sbp/sbp_target.c
50778index 24884ca..26c8220 100644
50779--- a/drivers/target/sbp/sbp_target.c
50780+++ b/drivers/target/sbp/sbp_target.c
50781@@ -62,7 +62,7 @@ static const u32 sbp_unit_directory_template[] = {
50782
50783 #define SESSION_MAINTENANCE_INTERVAL HZ
50784
50785-static atomic_t login_id = ATOMIC_INIT(0);
50786+static atomic_unchecked_t login_id = ATOMIC_INIT(0);
50787
50788 static void session_maintenance_work(struct work_struct *);
50789 static int sbp_run_transaction(struct fw_card *, int, int, int, int,
50790@@ -444,7 +444,7 @@ static void sbp_management_request_login(
50791 login->lun = se_lun;
50792 login->status_fifo_addr = sbp2_pointer_to_addr(&req->orb.status_fifo);
50793 login->exclusive = LOGIN_ORB_EXCLUSIVE(be32_to_cpu(req->orb.misc));
50794- login->login_id = atomic_inc_return(&login_id);
50795+ login->login_id = atomic_inc_return_unchecked(&login_id);
50796
50797 login->tgt_agt = sbp_target_agent_register(login);
50798 if (IS_ERR(login->tgt_agt)) {
50799diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
50800index 26416c1..e796a3d 100644
50801--- a/drivers/target/target_core_device.c
50802+++ b/drivers/target/target_core_device.c
50803@@ -1524,7 +1524,7 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
50804 spin_lock_init(&dev->se_tmr_lock);
50805 spin_lock_init(&dev->qf_cmd_lock);
50806 sema_init(&dev->caw_sem, 1);
50807- atomic_set(&dev->dev_ordered_id, 0);
50808+ atomic_set_unchecked(&dev->dev_ordered_id, 0);
50809 INIT_LIST_HEAD(&dev->t10_wwn.t10_vpd_list);
50810 spin_lock_init(&dev->t10_wwn.t10_vpd_lock);
50811 INIT_LIST_HEAD(&dev->t10_pr.registration_list);
50812diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
50813index 98b48d4..f4297e5 100644
50814--- a/drivers/target/target_core_transport.c
50815+++ b/drivers/target/target_core_transport.c
50816@@ -1137,7 +1137,7 @@ transport_check_alloc_task_attr(struct se_cmd *cmd)
50817 * Used to determine when ORDERED commands should go from
50818 * Dormant to Active status.
50819 */
50820- cmd->se_ordered_id = atomic_inc_return(&dev->dev_ordered_id);
50821+ cmd->se_ordered_id = atomic_inc_return_unchecked(&dev->dev_ordered_id);
50822 smp_mb__after_atomic_inc();
50823 pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
50824 cmd->se_ordered_id, cmd->sam_task_attr,
50825diff --git a/drivers/thermal/of-thermal.c b/drivers/thermal/of-thermal.c
50826index 04b1be7..5eff86d 100644
50827--- a/drivers/thermal/of-thermal.c
50828+++ b/drivers/thermal/of-thermal.c
50829@@ -30,6 +30,7 @@
50830 #include <linux/err.h>
50831 #include <linux/export.h>
50832 #include <linux/string.h>
50833+#include <linux/mm.h>
50834
50835 #include "thermal_core.h"
50836
50837@@ -341,8 +342,10 @@ thermal_zone_of_add_sensor(struct device_node *zone,
50838 tz->get_trend = get_trend;
50839 tz->sensor_data = data;
50840
50841- tzd->ops->get_temp = of_thermal_get_temp;
50842- tzd->ops->get_trend = of_thermal_get_trend;
50843+ pax_open_kernel();
50844+ *(void **)&tzd->ops->get_temp = of_thermal_get_temp;
50845+ *(void **)&tzd->ops->get_trend = of_thermal_get_trend;
50846+ pax_close_kernel();
50847 mutex_unlock(&tzd->lock);
50848
50849 return tzd;
50850@@ -461,8 +464,10 @@ void thermal_zone_of_sensor_unregister(struct device *dev,
50851 return;
50852
50853 mutex_lock(&tzd->lock);
50854- tzd->ops->get_temp = NULL;
50855- tzd->ops->get_trend = NULL;
50856+ pax_open_kernel();
50857+ *(void **)&tzd->ops->get_temp = NULL;
50858+ *(void **)&tzd->ops->get_trend = NULL;
50859+ pax_close_kernel();
50860
50861 tz->get_temp = NULL;
50862 tz->get_trend = NULL;
50863diff --git a/drivers/tty/cyclades.c b/drivers/tty/cyclades.c
50864index a57bb5a..1f727d33 100644
50865--- a/drivers/tty/cyclades.c
50866+++ b/drivers/tty/cyclades.c
50867@@ -1570,10 +1570,10 @@ static int cy_open(struct tty_struct *tty, struct file *filp)
50868 printk(KERN_DEBUG "cyc:cy_open ttyC%d, count = %d\n", info->line,
50869 info->port.count);
50870 #endif
50871- info->port.count++;
50872+ atomic_inc(&info->port.count);
50873 #ifdef CY_DEBUG_COUNT
50874 printk(KERN_DEBUG "cyc:cy_open (%d): incrementing count to %d\n",
50875- current->pid, info->port.count);
50876+ current->pid, atomic_read(&info->port.count));
50877 #endif
50878
50879 /*
50880@@ -3974,7 +3974,7 @@ static int cyclades_proc_show(struct seq_file *m, void *v)
50881 for (j = 0; j < cy_card[i].nports; j++) {
50882 info = &cy_card[i].ports[j];
50883
50884- if (info->port.count) {
50885+ if (atomic_read(&info->port.count)) {
50886 /* XXX is the ldisc num worth this? */
50887 struct tty_struct *tty;
50888 struct tty_ldisc *ld;
50889diff --git a/drivers/tty/hvc/hvc_console.c b/drivers/tty/hvc/hvc_console.c
50890index 0ff7fda..dbc7d52 100644
50891--- a/drivers/tty/hvc/hvc_console.c
50892+++ b/drivers/tty/hvc/hvc_console.c
50893@@ -342,7 +342,7 @@ static int hvc_open(struct tty_struct *tty, struct file * filp)
50894
50895 spin_lock_irqsave(&hp->port.lock, flags);
50896 /* Check and then increment for fast path open. */
50897- if (hp->port.count++ > 0) {
50898+ if (atomic_inc_return(&hp->port.count) > 1) {
50899 spin_unlock_irqrestore(&hp->port.lock, flags);
50900 hvc_kick();
50901 return 0;
50902@@ -397,7 +397,7 @@ static void hvc_close(struct tty_struct *tty, struct file * filp)
50903
50904 spin_lock_irqsave(&hp->port.lock, flags);
50905
50906- if (--hp->port.count == 0) {
50907+ if (atomic_dec_return(&hp->port.count) == 0) {
50908 spin_unlock_irqrestore(&hp->port.lock, flags);
50909 /* We are done with the tty pointer now. */
50910 tty_port_tty_set(&hp->port, NULL);
50911@@ -419,9 +419,9 @@ static void hvc_close(struct tty_struct *tty, struct file * filp)
50912 */
50913 tty_wait_until_sent_from_close(tty, HVC_CLOSE_WAIT);
50914 } else {
50915- if (hp->port.count < 0)
50916+ if (atomic_read(&hp->port.count) < 0)
50917 printk(KERN_ERR "hvc_close %X: oops, count is %d\n",
50918- hp->vtermno, hp->port.count);
50919+ hp->vtermno, atomic_read(&hp->port.count));
50920 spin_unlock_irqrestore(&hp->port.lock, flags);
50921 }
50922 }
50923@@ -451,12 +451,12 @@ static void hvc_hangup(struct tty_struct *tty)
50924 * open->hangup case this can be called after the final close so prevent
50925 * that from happening for now.
50926 */
50927- if (hp->port.count <= 0) {
50928+ if (atomic_read(&hp->port.count) <= 0) {
50929 spin_unlock_irqrestore(&hp->port.lock, flags);
50930 return;
50931 }
50932
50933- hp->port.count = 0;
50934+ atomic_set(&hp->port.count, 0);
50935 spin_unlock_irqrestore(&hp->port.lock, flags);
50936 tty_port_tty_set(&hp->port, NULL);
50937
50938@@ -504,7 +504,7 @@ static int hvc_write(struct tty_struct *tty, const unsigned char *buf, int count
50939 return -EPIPE;
50940
50941 /* FIXME what's this (unprotected) check for? */
50942- if (hp->port.count <= 0)
50943+ if (atomic_read(&hp->port.count) <= 0)
50944 return -EIO;
50945
50946 spin_lock_irqsave(&hp->lock, flags);
50947diff --git a/drivers/tty/hvc/hvcs.c b/drivers/tty/hvc/hvcs.c
50948index 81e939e..95ead10 100644
50949--- a/drivers/tty/hvc/hvcs.c
50950+++ b/drivers/tty/hvc/hvcs.c
50951@@ -83,6 +83,7 @@
50952 #include <asm/hvcserver.h>
50953 #include <asm/uaccess.h>
50954 #include <asm/vio.h>
50955+#include <asm/local.h>
50956
50957 /*
50958 * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
50959@@ -416,7 +417,7 @@ static ssize_t hvcs_vterm_state_store(struct device *dev, struct device_attribut
50960
50961 spin_lock_irqsave(&hvcsd->lock, flags);
50962
50963- if (hvcsd->port.count > 0) {
50964+ if (atomic_read(&hvcsd->port.count) > 0) {
50965 spin_unlock_irqrestore(&hvcsd->lock, flags);
50966 printk(KERN_INFO "HVCS: vterm state unchanged. "
50967 "The hvcs device node is still in use.\n");
50968@@ -1127,7 +1128,7 @@ static int hvcs_install(struct tty_driver *driver, struct tty_struct *tty)
50969 }
50970 }
50971
50972- hvcsd->port.count = 0;
50973+ atomic_set(&hvcsd->port.count, 0);
50974 hvcsd->port.tty = tty;
50975 tty->driver_data = hvcsd;
50976
50977@@ -1180,7 +1181,7 @@ static int hvcs_open(struct tty_struct *tty, struct file *filp)
50978 unsigned long flags;
50979
50980 spin_lock_irqsave(&hvcsd->lock, flags);
50981- hvcsd->port.count++;
50982+ atomic_inc(&hvcsd->port.count);
50983 hvcsd->todo_mask |= HVCS_SCHED_READ;
50984 spin_unlock_irqrestore(&hvcsd->lock, flags);
50985
50986@@ -1216,7 +1217,7 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
50987 hvcsd = tty->driver_data;
50988
50989 spin_lock_irqsave(&hvcsd->lock, flags);
50990- if (--hvcsd->port.count == 0) {
50991+ if (atomic_dec_and_test(&hvcsd->port.count)) {
50992
50993 vio_disable_interrupts(hvcsd->vdev);
50994
50995@@ -1241,10 +1242,10 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
50996
50997 free_irq(irq, hvcsd);
50998 return;
50999- } else if (hvcsd->port.count < 0) {
51000+ } else if (atomic_read(&hvcsd->port.count) < 0) {
51001 printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
51002 " is missmanaged.\n",
51003- hvcsd->vdev->unit_address, hvcsd->port.count);
51004+ hvcsd->vdev->unit_address, atomic_read(&hvcsd->port.count));
51005 }
51006
51007 spin_unlock_irqrestore(&hvcsd->lock, flags);
51008@@ -1266,7 +1267,7 @@ static void hvcs_hangup(struct tty_struct * tty)
51009
51010 spin_lock_irqsave(&hvcsd->lock, flags);
51011 /* Preserve this so that we know how many kref refs to put */
51012- temp_open_count = hvcsd->port.count;
51013+ temp_open_count = atomic_read(&hvcsd->port.count);
51014
51015 /*
51016 * Don't kref put inside the spinlock because the destruction
51017@@ -1281,7 +1282,7 @@ static void hvcs_hangup(struct tty_struct * tty)
51018 tty->driver_data = NULL;
51019 hvcsd->port.tty = NULL;
51020
51021- hvcsd->port.count = 0;
51022+ atomic_set(&hvcsd->port.count, 0);
51023
51024 /* This will drop any buffered data on the floor which is OK in a hangup
51025 * scenario. */
51026@@ -1352,7 +1353,7 @@ static int hvcs_write(struct tty_struct *tty,
51027 * the middle of a write operation? This is a crummy place to do this
51028 * but we want to keep it all in the spinlock.
51029 */
51030- if (hvcsd->port.count <= 0) {
51031+ if (atomic_read(&hvcsd->port.count) <= 0) {
51032 spin_unlock_irqrestore(&hvcsd->lock, flags);
51033 return -ENODEV;
51034 }
51035@@ -1426,7 +1427,7 @@ static int hvcs_write_room(struct tty_struct *tty)
51036 {
51037 struct hvcs_struct *hvcsd = tty->driver_data;
51038
51039- if (!hvcsd || hvcsd->port.count <= 0)
51040+ if (!hvcsd || atomic_read(&hvcsd->port.count) <= 0)
51041 return 0;
51042
51043 return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
51044diff --git a/drivers/tty/hvc/hvsi.c b/drivers/tty/hvc/hvsi.c
51045index 4190199..06d5bfa 100644
51046--- a/drivers/tty/hvc/hvsi.c
51047+++ b/drivers/tty/hvc/hvsi.c
51048@@ -85,7 +85,7 @@ struct hvsi_struct {
51049 int n_outbuf;
51050 uint32_t vtermno;
51051 uint32_t virq;
51052- atomic_t seqno; /* HVSI packet sequence number */
51053+ atomic_unchecked_t seqno; /* HVSI packet sequence number */
51054 uint16_t mctrl;
51055 uint8_t state; /* HVSI protocol state */
51056 uint8_t flags;
51057@@ -295,7 +295,7 @@ static int hvsi_version_respond(struct hvsi_struct *hp, uint16_t query_seqno)
51058
51059 packet.hdr.type = VS_QUERY_RESPONSE_PACKET_HEADER;
51060 packet.hdr.len = sizeof(struct hvsi_query_response);
51061- packet.hdr.seqno = atomic_inc_return(&hp->seqno);
51062+ packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
51063 packet.verb = VSV_SEND_VERSION_NUMBER;
51064 packet.u.version = HVSI_VERSION;
51065 packet.query_seqno = query_seqno+1;
51066@@ -555,7 +555,7 @@ static int hvsi_query(struct hvsi_struct *hp, uint16_t verb)
51067
51068 packet.hdr.type = VS_QUERY_PACKET_HEADER;
51069 packet.hdr.len = sizeof(struct hvsi_query);
51070- packet.hdr.seqno = atomic_inc_return(&hp->seqno);
51071+ packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
51072 packet.verb = verb;
51073
51074 pr_debug("%s: sending %i bytes\n", __func__, packet.hdr.len);
51075@@ -597,7 +597,7 @@ static int hvsi_set_mctrl(struct hvsi_struct *hp, uint16_t mctrl)
51076 int wrote;
51077
51078 packet.hdr.type = VS_CONTROL_PACKET_HEADER,
51079- packet.hdr.seqno = atomic_inc_return(&hp->seqno);
51080+ packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
51081 packet.hdr.len = sizeof(struct hvsi_control);
51082 packet.verb = VSV_SET_MODEM_CTL;
51083 packet.mask = HVSI_TSDTR;
51084@@ -680,7 +680,7 @@ static int hvsi_put_chars(struct hvsi_struct *hp, const char *buf, int count)
51085 BUG_ON(count > HVSI_MAX_OUTGOING_DATA);
51086
51087 packet.hdr.type = VS_DATA_PACKET_HEADER;
51088- packet.hdr.seqno = atomic_inc_return(&hp->seqno);
51089+ packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
51090 packet.hdr.len = count + sizeof(struct hvsi_header);
51091 memcpy(&packet.data, buf, count);
51092
51093@@ -697,7 +697,7 @@ static void hvsi_close_protocol(struct hvsi_struct *hp)
51094 struct hvsi_control packet __ALIGNED__;
51095
51096 packet.hdr.type = VS_CONTROL_PACKET_HEADER;
51097- packet.hdr.seqno = atomic_inc_return(&hp->seqno);
51098+ packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
51099 packet.hdr.len = 6;
51100 packet.verb = VSV_CLOSE_PROTOCOL;
51101
51102@@ -725,7 +725,7 @@ static int hvsi_open(struct tty_struct *tty, struct file *filp)
51103
51104 tty_port_tty_set(&hp->port, tty);
51105 spin_lock_irqsave(&hp->lock, flags);
51106- hp->port.count++;
51107+ atomic_inc(&hp->port.count);
51108 atomic_set(&hp->seqno, 0);
51109 h_vio_signal(hp->vtermno, VIO_IRQ_ENABLE);
51110 spin_unlock_irqrestore(&hp->lock, flags);
51111@@ -782,7 +782,7 @@ static void hvsi_close(struct tty_struct *tty, struct file *filp)
51112
51113 spin_lock_irqsave(&hp->lock, flags);
51114
51115- if (--hp->port.count == 0) {
51116+ if (atomic_dec_return(&hp->port.count) == 0) {
51117 tty_port_tty_set(&hp->port, NULL);
51118 hp->inbuf_end = hp->inbuf; /* discard remaining partial packets */
51119
51120@@ -815,9 +815,9 @@ static void hvsi_close(struct tty_struct *tty, struct file *filp)
51121
51122 spin_lock_irqsave(&hp->lock, flags);
51123 }
51124- } else if (hp->port.count < 0)
51125+ } else if (atomic_read(&hp->port.count) < 0)
51126 printk(KERN_ERR "hvsi_close %lu: oops, count is %d\n",
51127- hp - hvsi_ports, hp->port.count);
51128+ hp - hvsi_ports, atomic_read(&hp->port.count));
51129
51130 spin_unlock_irqrestore(&hp->lock, flags);
51131 }
51132@@ -832,7 +832,7 @@ static void hvsi_hangup(struct tty_struct *tty)
51133 tty_port_tty_set(&hp->port, NULL);
51134
51135 spin_lock_irqsave(&hp->lock, flags);
51136- hp->port.count = 0;
51137+ atomic_set(&hp->port.count, 0);
51138 hp->n_outbuf = 0;
51139 spin_unlock_irqrestore(&hp->lock, flags);
51140 }
51141diff --git a/drivers/tty/hvc/hvsi_lib.c b/drivers/tty/hvc/hvsi_lib.c
51142index 7ae6c29..05c6dba 100644
51143--- a/drivers/tty/hvc/hvsi_lib.c
51144+++ b/drivers/tty/hvc/hvsi_lib.c
51145@@ -8,7 +8,7 @@
51146
51147 static int hvsi_send_packet(struct hvsi_priv *pv, struct hvsi_header *packet)
51148 {
51149- packet->seqno = cpu_to_be16(atomic_inc_return(&pv->seqno));
51150+ packet->seqno = cpu_to_be16(atomic_inc_return_unchecked(&pv->seqno));
51151
51152 /* Assumes that always succeeds, works in practice */
51153 return pv->put_chars(pv->termno, (char *)packet, packet->len);
51154@@ -20,7 +20,7 @@ static void hvsi_start_handshake(struct hvsi_priv *pv)
51155
51156 /* Reset state */
51157 pv->established = 0;
51158- atomic_set(&pv->seqno, 0);
51159+ atomic_set_unchecked(&pv->seqno, 0);
51160
51161 pr_devel("HVSI@%x: Handshaking started\n", pv->termno);
51162
51163diff --git a/drivers/tty/ipwireless/tty.c b/drivers/tty/ipwireless/tty.c
51164index 17ee3bf..8d2520d 100644
51165--- a/drivers/tty/ipwireless/tty.c
51166+++ b/drivers/tty/ipwireless/tty.c
51167@@ -28,6 +28,7 @@
51168 #include <linux/tty_driver.h>
51169 #include <linux/tty_flip.h>
51170 #include <linux/uaccess.h>
51171+#include <asm/local.h>
51172
51173 #include "tty.h"
51174 #include "network.h"
51175@@ -98,10 +99,10 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
51176 mutex_unlock(&tty->ipw_tty_mutex);
51177 return -ENODEV;
51178 }
51179- if (tty->port.count == 0)
51180+ if (atomic_read(&tty->port.count) == 0)
51181 tty->tx_bytes_queued = 0;
51182
51183- tty->port.count++;
51184+ atomic_inc(&tty->port.count);
51185
51186 tty->port.tty = linux_tty;
51187 linux_tty->driver_data = tty;
51188@@ -117,9 +118,7 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
51189
51190 static void do_ipw_close(struct ipw_tty *tty)
51191 {
51192- tty->port.count--;
51193-
51194- if (tty->port.count == 0) {
51195+ if (atomic_dec_return(&tty->port.count) == 0) {
51196 struct tty_struct *linux_tty = tty->port.tty;
51197
51198 if (linux_tty != NULL) {
51199@@ -140,7 +139,7 @@ static void ipw_hangup(struct tty_struct *linux_tty)
51200 return;
51201
51202 mutex_lock(&tty->ipw_tty_mutex);
51203- if (tty->port.count == 0) {
51204+ if (atomic_read(&tty->port.count) == 0) {
51205 mutex_unlock(&tty->ipw_tty_mutex);
51206 return;
51207 }
51208@@ -163,7 +162,7 @@ void ipwireless_tty_received(struct ipw_tty *tty, unsigned char *data,
51209
51210 mutex_lock(&tty->ipw_tty_mutex);
51211
51212- if (!tty->port.count) {
51213+ if (!atomic_read(&tty->port.count)) {
51214 mutex_unlock(&tty->ipw_tty_mutex);
51215 return;
51216 }
51217@@ -202,7 +201,7 @@ static int ipw_write(struct tty_struct *linux_tty,
51218 return -ENODEV;
51219
51220 mutex_lock(&tty->ipw_tty_mutex);
51221- if (!tty->port.count) {
51222+ if (!atomic_read(&tty->port.count)) {
51223 mutex_unlock(&tty->ipw_tty_mutex);
51224 return -EINVAL;
51225 }
51226@@ -242,7 +241,7 @@ static int ipw_write_room(struct tty_struct *linux_tty)
51227 if (!tty)
51228 return -ENODEV;
51229
51230- if (!tty->port.count)
51231+ if (!atomic_read(&tty->port.count))
51232 return -EINVAL;
51233
51234 room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
51235@@ -284,7 +283,7 @@ static int ipw_chars_in_buffer(struct tty_struct *linux_tty)
51236 if (!tty)
51237 return 0;
51238
51239- if (!tty->port.count)
51240+ if (!atomic_read(&tty->port.count))
51241 return 0;
51242
51243 return tty->tx_bytes_queued;
51244@@ -365,7 +364,7 @@ static int ipw_tiocmget(struct tty_struct *linux_tty)
51245 if (!tty)
51246 return -ENODEV;
51247
51248- if (!tty->port.count)
51249+ if (!atomic_read(&tty->port.count))
51250 return -EINVAL;
51251
51252 return get_control_lines(tty);
51253@@ -381,7 +380,7 @@ ipw_tiocmset(struct tty_struct *linux_tty,
51254 if (!tty)
51255 return -ENODEV;
51256
51257- if (!tty->port.count)
51258+ if (!atomic_read(&tty->port.count))
51259 return -EINVAL;
51260
51261 return set_control_lines(tty, set, clear);
51262@@ -395,7 +394,7 @@ static int ipw_ioctl(struct tty_struct *linux_tty,
51263 if (!tty)
51264 return -ENODEV;
51265
51266- if (!tty->port.count)
51267+ if (!atomic_read(&tty->port.count))
51268 return -EINVAL;
51269
51270 /* FIXME: Exactly how is the tty object locked here .. */
51271@@ -551,7 +550,7 @@ void ipwireless_tty_free(struct ipw_tty *tty)
51272 * are gone */
51273 mutex_lock(&ttyj->ipw_tty_mutex);
51274 }
51275- while (ttyj->port.count)
51276+ while (atomic_read(&ttyj->port.count))
51277 do_ipw_close(ttyj);
51278 ipwireless_disassociate_network_ttys(network,
51279 ttyj->channel_idx);
51280diff --git a/drivers/tty/moxa.c b/drivers/tty/moxa.c
51281index 1deaca4..c8582d4 100644
51282--- a/drivers/tty/moxa.c
51283+++ b/drivers/tty/moxa.c
51284@@ -1189,7 +1189,7 @@ static int moxa_open(struct tty_struct *tty, struct file *filp)
51285 }
51286
51287 ch = &brd->ports[port % MAX_PORTS_PER_BOARD];
51288- ch->port.count++;
51289+ atomic_inc(&ch->port.count);
51290 tty->driver_data = ch;
51291 tty_port_tty_set(&ch->port, tty);
51292 mutex_lock(&ch->port.mutex);
51293diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
51294index 2ebe47b..3205833 100644
51295--- a/drivers/tty/n_gsm.c
51296+++ b/drivers/tty/n_gsm.c
51297@@ -1644,7 +1644,7 @@ static struct gsm_dlci *gsm_dlci_alloc(struct gsm_mux *gsm, int addr)
51298 spin_lock_init(&dlci->lock);
51299 mutex_init(&dlci->mutex);
51300 dlci->fifo = &dlci->_fifo;
51301- if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL) < 0) {
51302+ if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL)) {
51303 kfree(dlci);
51304 return NULL;
51305 }
51306@@ -2954,7 +2954,7 @@ static int gsmtty_open(struct tty_struct *tty, struct file *filp)
51307 struct gsm_dlci *dlci = tty->driver_data;
51308 struct tty_port *port = &dlci->port;
51309
51310- port->count++;
51311+ atomic_inc(&port->count);
51312 tty_port_tty_set(port, tty);
51313
51314 dlci->modem_rx = 0;
51315diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
51316index e36d1f5..9938e3e 100644
51317--- a/drivers/tty/n_tty.c
51318+++ b/drivers/tty/n_tty.c
51319@@ -115,7 +115,7 @@ struct n_tty_data {
51320 int minimum_to_wake;
51321
51322 /* consumer-published */
51323- size_t read_tail;
51324+ size_t read_tail __intentional_overflow(-1);
51325 size_t line_start;
51326
51327 /* protected by output lock */
51328@@ -2519,6 +2519,7 @@ void n_tty_inherit_ops(struct tty_ldisc_ops *ops)
51329 {
51330 *ops = tty_ldisc_N_TTY;
51331 ops->owner = NULL;
51332- ops->refcount = ops->flags = 0;
51333+ atomic_set(&ops->refcount, 0);
51334+ ops->flags = 0;
51335 }
51336 EXPORT_SYMBOL_GPL(n_tty_inherit_ops);
51337diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
51338index 25c9bc7..24077b7 100644
51339--- a/drivers/tty/pty.c
51340+++ b/drivers/tty/pty.c
51341@@ -790,8 +790,10 @@ static void __init unix98_pty_init(void)
51342 panic("Couldn't register Unix98 pts driver");
51343
51344 /* Now create the /dev/ptmx special device */
51345+ pax_open_kernel();
51346 tty_default_fops(&ptmx_fops);
51347- ptmx_fops.open = ptmx_open;
51348+ *(void **)&ptmx_fops.open = ptmx_open;
51349+ pax_close_kernel();
51350
51351 cdev_init(&ptmx_cdev, &ptmx_fops);
51352 if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
51353diff --git a/drivers/tty/rocket.c b/drivers/tty/rocket.c
51354index 383c4c7..d408e21 100644
51355--- a/drivers/tty/rocket.c
51356+++ b/drivers/tty/rocket.c
51357@@ -914,7 +914,7 @@ static int rp_open(struct tty_struct *tty, struct file *filp)
51358 tty->driver_data = info;
51359 tty_port_tty_set(port, tty);
51360
51361- if (port->count++ == 0) {
51362+ if (atomic_inc_return(&port->count) == 1) {
51363 atomic_inc(&rp_num_ports_open);
51364
51365 #ifdef ROCKET_DEBUG_OPEN
51366@@ -923,7 +923,7 @@ static int rp_open(struct tty_struct *tty, struct file *filp)
51367 #endif
51368 }
51369 #ifdef ROCKET_DEBUG_OPEN
51370- printk(KERN_INFO "rp_open ttyR%d, count=%d\n", info->line, info->port.count);
51371+ printk(KERN_INFO "rp_open ttyR%d, count=%d\n", info->line, atomic-read(&info->port.count));
51372 #endif
51373
51374 /*
51375@@ -1515,7 +1515,7 @@ static void rp_hangup(struct tty_struct *tty)
51376 spin_unlock_irqrestore(&info->port.lock, flags);
51377 return;
51378 }
51379- if (info->port.count)
51380+ if (atomic_read(&info->port.count))
51381 atomic_dec(&rp_num_ports_open);
51382 clear_bit((info->aiop * 8) + info->chan, (void *) &xmit_flags[info->board]);
51383 spin_unlock_irqrestore(&info->port.lock, flags);
51384diff --git a/drivers/tty/serial/ioc4_serial.c b/drivers/tty/serial/ioc4_serial.c
51385index 1274499..f541382 100644
51386--- a/drivers/tty/serial/ioc4_serial.c
51387+++ b/drivers/tty/serial/ioc4_serial.c
51388@@ -437,7 +437,7 @@ struct ioc4_soft {
51389 } is_intr_info[MAX_IOC4_INTR_ENTS];
51390
51391 /* Number of entries active in the above array */
51392- atomic_t is_num_intrs;
51393+ atomic_unchecked_t is_num_intrs;
51394 } is_intr_type[IOC4_NUM_INTR_TYPES];
51395
51396 /* is_ir_lock must be held while
51397@@ -974,7 +974,7 @@ intr_connect(struct ioc4_soft *soft, int type,
51398 BUG_ON(!((type == IOC4_SIO_INTR_TYPE)
51399 || (type == IOC4_OTHER_INTR_TYPE)));
51400
51401- i = atomic_inc_return(&soft-> is_intr_type[type].is_num_intrs) - 1;
51402+ i = atomic_inc_return_unchecked(&soft-> is_intr_type[type].is_num_intrs) - 1;
51403 BUG_ON(!(i < MAX_IOC4_INTR_ENTS || (printk("i %d\n", i), 0)));
51404
51405 /* Save off the lower level interrupt handler */
51406@@ -1001,7 +1001,7 @@ static irqreturn_t ioc4_intr(int irq, void *arg)
51407
51408 soft = arg;
51409 for (intr_type = 0; intr_type < IOC4_NUM_INTR_TYPES; intr_type++) {
51410- num_intrs = (int)atomic_read(
51411+ num_intrs = (int)atomic_read_unchecked(
51412 &soft->is_intr_type[intr_type].is_num_intrs);
51413
51414 this_mir = this_ir = pending_intrs(soft, intr_type);
51415diff --git a/drivers/tty/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c
51416index a260cde..6b2b5ce 100644
51417--- a/drivers/tty/serial/kgdboc.c
51418+++ b/drivers/tty/serial/kgdboc.c
51419@@ -24,8 +24,9 @@
51420 #define MAX_CONFIG_LEN 40
51421
51422 static struct kgdb_io kgdboc_io_ops;
51423+static struct kgdb_io kgdboc_io_ops_console;
51424
51425-/* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
51426+/* -1 = init not run yet, 0 = unconfigured, 1/2 = configured. */
51427 static int configured = -1;
51428
51429 static char config[MAX_CONFIG_LEN];
51430@@ -151,6 +152,8 @@ static void cleanup_kgdboc(void)
51431 kgdboc_unregister_kbd();
51432 if (configured == 1)
51433 kgdb_unregister_io_module(&kgdboc_io_ops);
51434+ else if (configured == 2)
51435+ kgdb_unregister_io_module(&kgdboc_io_ops_console);
51436 }
51437
51438 static int configure_kgdboc(void)
51439@@ -160,13 +163,13 @@ static int configure_kgdboc(void)
51440 int err;
51441 char *cptr = config;
51442 struct console *cons;
51443+ int is_console = 0;
51444
51445 err = kgdboc_option_setup(config);
51446 if (err || !strlen(config) || isspace(config[0]))
51447 goto noconfig;
51448
51449 err = -ENODEV;
51450- kgdboc_io_ops.is_console = 0;
51451 kgdb_tty_driver = NULL;
51452
51453 kgdboc_use_kms = 0;
51454@@ -187,7 +190,7 @@ static int configure_kgdboc(void)
51455 int idx;
51456 if (cons->device && cons->device(cons, &idx) == p &&
51457 idx == tty_line) {
51458- kgdboc_io_ops.is_console = 1;
51459+ is_console = 1;
51460 break;
51461 }
51462 cons = cons->next;
51463@@ -197,7 +200,13 @@ static int configure_kgdboc(void)
51464 kgdb_tty_line = tty_line;
51465
51466 do_register:
51467- err = kgdb_register_io_module(&kgdboc_io_ops);
51468+ if (is_console) {
51469+ err = kgdb_register_io_module(&kgdboc_io_ops_console);
51470+ configured = 2;
51471+ } else {
51472+ err = kgdb_register_io_module(&kgdboc_io_ops);
51473+ configured = 1;
51474+ }
51475 if (err)
51476 goto noconfig;
51477
51478@@ -205,8 +214,6 @@ do_register:
51479 if (err)
51480 goto nmi_con_failed;
51481
51482- configured = 1;
51483-
51484 return 0;
51485
51486 nmi_con_failed:
51487@@ -223,7 +230,7 @@ noconfig:
51488 static int __init init_kgdboc(void)
51489 {
51490 /* Already configured? */
51491- if (configured == 1)
51492+ if (configured >= 1)
51493 return 0;
51494
51495 return configure_kgdboc();
51496@@ -272,7 +279,7 @@ static int param_set_kgdboc_var(const char *kmessage, struct kernel_param *kp)
51497 if (config[len - 1] == '\n')
51498 config[len - 1] = '\0';
51499
51500- if (configured == 1)
51501+ if (configured >= 1)
51502 cleanup_kgdboc();
51503
51504 /* Go and configure with the new params. */
51505@@ -312,6 +319,15 @@ static struct kgdb_io kgdboc_io_ops = {
51506 .post_exception = kgdboc_post_exp_handler,
51507 };
51508
51509+static struct kgdb_io kgdboc_io_ops_console = {
51510+ .name = "kgdboc",
51511+ .read_char = kgdboc_get_char,
51512+ .write_char = kgdboc_put_char,
51513+ .pre_exception = kgdboc_pre_exp_handler,
51514+ .post_exception = kgdboc_post_exp_handler,
51515+ .is_console = 1
51516+};
51517+
51518 #ifdef CONFIG_KGDB_SERIAL_CONSOLE
51519 /* This is only available if kgdboc is a built in for early debugging */
51520 static int __init kgdboc_early_init(char *opt)
51521diff --git a/drivers/tty/serial/msm_serial.c b/drivers/tty/serial/msm_serial.c
51522index b5d779c..3622cfe 100644
51523--- a/drivers/tty/serial/msm_serial.c
51524+++ b/drivers/tty/serial/msm_serial.c
51525@@ -897,7 +897,7 @@ static struct uart_driver msm_uart_driver = {
51526 .cons = MSM_CONSOLE,
51527 };
51528
51529-static atomic_t msm_uart_next_id = ATOMIC_INIT(0);
51530+static atomic_unchecked_t msm_uart_next_id = ATOMIC_INIT(0);
51531
51532 static const struct of_device_id msm_uartdm_table[] = {
51533 { .compatible = "qcom,msm-uartdm" },
51534@@ -912,7 +912,7 @@ static int __init msm_serial_probe(struct platform_device *pdev)
51535 int irq;
51536
51537 if (pdev->id == -1)
51538- pdev->id = atomic_inc_return(&msm_uart_next_id) - 1;
51539+ pdev->id = atomic_inc_return_unchecked(&msm_uart_next_id) - 1;
51540
51541 if (unlikely(pdev->id < 0 || pdev->id >= UART_NR))
51542 return -ENXIO;
51543diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c
51544index 9cd706d..6ff2de7 100644
51545--- a/drivers/tty/serial/samsung.c
51546+++ b/drivers/tty/serial/samsung.c
51547@@ -463,11 +463,16 @@ static void s3c24xx_serial_shutdown(struct uart_port *port)
51548 }
51549 }
51550
51551+static int s3c64xx_serial_startup(struct uart_port *port);
51552 static int s3c24xx_serial_startup(struct uart_port *port)
51553 {
51554 struct s3c24xx_uart_port *ourport = to_ourport(port);
51555 int ret;
51556
51557+ /* Startup sequence is different for s3c64xx and higher SoC's */
51558+ if (s3c24xx_serial_has_interrupt_mask(port))
51559+ return s3c64xx_serial_startup(port);
51560+
51561 dbg("s3c24xx_serial_startup: port=%p (%08lx,%p)\n",
51562 port->mapbase, port->membase);
51563
51564@@ -1141,10 +1146,6 @@ static int s3c24xx_serial_init_port(struct s3c24xx_uart_port *ourport,
51565 /* setup info for port */
51566 port->dev = &platdev->dev;
51567
51568- /* Startup sequence is different for s3c64xx and higher SoC's */
51569- if (s3c24xx_serial_has_interrupt_mask(port))
51570- s3c24xx_serial_ops.startup = s3c64xx_serial_startup;
51571-
51572 port->uartclk = 1;
51573
51574 if (cfg->uart_flags & UPF_CONS_FLOW) {
51575diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
51576index ece2049..fba2524 100644
51577--- a/drivers/tty/serial/serial_core.c
51578+++ b/drivers/tty/serial/serial_core.c
51579@@ -1448,7 +1448,7 @@ static void uart_hangup(struct tty_struct *tty)
51580 uart_flush_buffer(tty);
51581 uart_shutdown(tty, state);
51582 spin_lock_irqsave(&port->lock, flags);
51583- port->count = 0;
51584+ atomic_set(&port->count, 0);
51585 clear_bit(ASYNCB_NORMAL_ACTIVE, &port->flags);
51586 spin_unlock_irqrestore(&port->lock, flags);
51587 tty_port_tty_set(port, NULL);
51588@@ -1544,7 +1544,7 @@ static int uart_open(struct tty_struct *tty, struct file *filp)
51589 goto end;
51590 }
51591
51592- port->count++;
51593+ atomic_inc(&port->count);
51594 if (!state->uart_port || state->uart_port->flags & UPF_DEAD) {
51595 retval = -ENXIO;
51596 goto err_dec_count;
51597@@ -1572,7 +1572,7 @@ static int uart_open(struct tty_struct *tty, struct file *filp)
51598 /*
51599 * Make sure the device is in D0 state.
51600 */
51601- if (port->count == 1)
51602+ if (atomic_read(&port->count) == 1)
51603 uart_change_pm(state, UART_PM_STATE_ON);
51604
51605 /*
51606@@ -1590,7 +1590,7 @@ static int uart_open(struct tty_struct *tty, struct file *filp)
51607 end:
51608 return retval;
51609 err_dec_count:
51610- port->count--;
51611+ atomic_inc(&port->count);
51612 mutex_unlock(&port->mutex);
51613 goto end;
51614 }
51615diff --git a/drivers/tty/synclink.c b/drivers/tty/synclink.c
51616index 5ae14b4..2c1288f 100644
51617--- a/drivers/tty/synclink.c
51618+++ b/drivers/tty/synclink.c
51619@@ -3090,7 +3090,7 @@ static void mgsl_close(struct tty_struct *tty, struct file * filp)
51620
51621 if (debug_level >= DEBUG_LEVEL_INFO)
51622 printk("%s(%d):mgsl_close(%s) entry, count=%d\n",
51623- __FILE__,__LINE__, info->device_name, info->port.count);
51624+ __FILE__,__LINE__, info->device_name, atomic_read(&info->port.count));
51625
51626 if (tty_port_close_start(&info->port, tty, filp) == 0)
51627 goto cleanup;
51628@@ -3108,7 +3108,7 @@ static void mgsl_close(struct tty_struct *tty, struct file * filp)
51629 cleanup:
51630 if (debug_level >= DEBUG_LEVEL_INFO)
51631 printk("%s(%d):mgsl_close(%s) exit, count=%d\n", __FILE__,__LINE__,
51632- tty->driver->name, info->port.count);
51633+ tty->driver->name, atomic_read(&info->port.count));
51634
51635 } /* end of mgsl_close() */
51636
51637@@ -3207,8 +3207,8 @@ static void mgsl_hangup(struct tty_struct *tty)
51638
51639 mgsl_flush_buffer(tty);
51640 shutdown(info);
51641-
51642- info->port.count = 0;
51643+
51644+ atomic_set(&info->port.count, 0);
51645 info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
51646 info->port.tty = NULL;
51647
51648@@ -3297,12 +3297,12 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
51649
51650 if (debug_level >= DEBUG_LEVEL_INFO)
51651 printk("%s(%d):block_til_ready before block on %s count=%d\n",
51652- __FILE__,__LINE__, tty->driver->name, port->count );
51653+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
51654
51655 spin_lock_irqsave(&info->irq_spinlock, flags);
51656 if (!tty_hung_up_p(filp)) {
51657 extra_count = true;
51658- port->count--;
51659+ atomic_dec(&port->count);
51660 }
51661 spin_unlock_irqrestore(&info->irq_spinlock, flags);
51662 port->blocked_open++;
51663@@ -3331,7 +3331,7 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
51664
51665 if (debug_level >= DEBUG_LEVEL_INFO)
51666 printk("%s(%d):block_til_ready blocking on %s count=%d\n",
51667- __FILE__,__LINE__, tty->driver->name, port->count );
51668+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
51669
51670 tty_unlock(tty);
51671 schedule();
51672@@ -3343,12 +3343,12 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
51673
51674 /* FIXME: Racy on hangup during close wait */
51675 if (extra_count)
51676- port->count++;
51677+ atomic_inc(&port->count);
51678 port->blocked_open--;
51679
51680 if (debug_level >= DEBUG_LEVEL_INFO)
51681 printk("%s(%d):block_til_ready after blocking on %s count=%d\n",
51682- __FILE__,__LINE__, tty->driver->name, port->count );
51683+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
51684
51685 if (!retval)
51686 port->flags |= ASYNC_NORMAL_ACTIVE;
51687@@ -3400,7 +3400,7 @@ static int mgsl_open(struct tty_struct *tty, struct file * filp)
51688
51689 if (debug_level >= DEBUG_LEVEL_INFO)
51690 printk("%s(%d):mgsl_open(%s), old ref count = %d\n",
51691- __FILE__,__LINE__,tty->driver->name, info->port.count);
51692+ __FILE__,__LINE__,tty->driver->name, atomic_read(&info->port.count));
51693
51694 /* If port is closing, signal caller to try again */
51695 if (tty_hung_up_p(filp) || info->port.flags & ASYNC_CLOSING){
51696@@ -3419,10 +3419,10 @@ static int mgsl_open(struct tty_struct *tty, struct file * filp)
51697 spin_unlock_irqrestore(&info->netlock, flags);
51698 goto cleanup;
51699 }
51700- info->port.count++;
51701+ atomic_inc(&info->port.count);
51702 spin_unlock_irqrestore(&info->netlock, flags);
51703
51704- if (info->port.count == 1) {
51705+ if (atomic_read(&info->port.count) == 1) {
51706 /* 1st open on this device, init hardware */
51707 retval = startup(info);
51708 if (retval < 0)
51709@@ -3446,8 +3446,8 @@ cleanup:
51710 if (retval) {
51711 if (tty->count == 1)
51712 info->port.tty = NULL; /* tty layer will release tty struct */
51713- if(info->port.count)
51714- info->port.count--;
51715+ if (atomic_read(&info->port.count))
51716+ atomic_dec(&info->port.count);
51717 }
51718
51719 return retval;
51720@@ -7665,7 +7665,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
51721 unsigned short new_crctype;
51722
51723 /* return error if TTY interface open */
51724- if (info->port.count)
51725+ if (atomic_read(&info->port.count))
51726 return -EBUSY;
51727
51728 switch (encoding)
51729@@ -7760,7 +7760,7 @@ static int hdlcdev_open(struct net_device *dev)
51730
51731 /* arbitrate between network and tty opens */
51732 spin_lock_irqsave(&info->netlock, flags);
51733- if (info->port.count != 0 || info->netcount != 0) {
51734+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
51735 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
51736 spin_unlock_irqrestore(&info->netlock, flags);
51737 return -EBUSY;
51738@@ -7846,7 +7846,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
51739 printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name);
51740
51741 /* return error if TTY interface open */
51742- if (info->port.count)
51743+ if (atomic_read(&info->port.count))
51744 return -EBUSY;
51745
51746 if (cmd != SIOCWANDEV)
51747diff --git a/drivers/tty/synclink_gt.c b/drivers/tty/synclink_gt.c
51748index c359a91..959fc26 100644
51749--- a/drivers/tty/synclink_gt.c
51750+++ b/drivers/tty/synclink_gt.c
51751@@ -670,7 +670,7 @@ static int open(struct tty_struct *tty, struct file *filp)
51752 tty->driver_data = info;
51753 info->port.tty = tty;
51754
51755- DBGINFO(("%s open, old ref count = %d\n", info->device_name, info->port.count));
51756+ DBGINFO(("%s open, old ref count = %d\n", info->device_name, atomic_read(&info->port.count)));
51757
51758 /* If port is closing, signal caller to try again */
51759 if (tty_hung_up_p(filp) || info->port.flags & ASYNC_CLOSING){
51760@@ -691,10 +691,10 @@ static int open(struct tty_struct *tty, struct file *filp)
51761 mutex_unlock(&info->port.mutex);
51762 goto cleanup;
51763 }
51764- info->port.count++;
51765+ atomic_inc(&info->port.count);
51766 spin_unlock_irqrestore(&info->netlock, flags);
51767
51768- if (info->port.count == 1) {
51769+ if (atomic_read(&info->port.count) == 1) {
51770 /* 1st open on this device, init hardware */
51771 retval = startup(info);
51772 if (retval < 0) {
51773@@ -715,8 +715,8 @@ cleanup:
51774 if (retval) {
51775 if (tty->count == 1)
51776 info->port.tty = NULL; /* tty layer will release tty struct */
51777- if(info->port.count)
51778- info->port.count--;
51779+ if(atomic_read(&info->port.count))
51780+ atomic_dec(&info->port.count);
51781 }
51782
51783 DBGINFO(("%s open rc=%d\n", info->device_name, retval));
51784@@ -729,7 +729,7 @@ static void close(struct tty_struct *tty, struct file *filp)
51785
51786 if (sanity_check(info, tty->name, "close"))
51787 return;
51788- DBGINFO(("%s close entry, count=%d\n", info->device_name, info->port.count));
51789+ DBGINFO(("%s close entry, count=%d\n", info->device_name, atomic_read(&info->port.count)));
51790
51791 if (tty_port_close_start(&info->port, tty, filp) == 0)
51792 goto cleanup;
51793@@ -746,7 +746,7 @@ static void close(struct tty_struct *tty, struct file *filp)
51794 tty_port_close_end(&info->port, tty);
51795 info->port.tty = NULL;
51796 cleanup:
51797- DBGINFO(("%s close exit, count=%d\n", tty->driver->name, info->port.count));
51798+ DBGINFO(("%s close exit, count=%d\n", tty->driver->name, atomic_read(&info->port.count)));
51799 }
51800
51801 static void hangup(struct tty_struct *tty)
51802@@ -764,7 +764,7 @@ static void hangup(struct tty_struct *tty)
51803 shutdown(info);
51804
51805 spin_lock_irqsave(&info->port.lock, flags);
51806- info->port.count = 0;
51807+ atomic_set(&info->port.count, 0);
51808 info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
51809 info->port.tty = NULL;
51810 spin_unlock_irqrestore(&info->port.lock, flags);
51811@@ -1449,7 +1449,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
51812 unsigned short new_crctype;
51813
51814 /* return error if TTY interface open */
51815- if (info->port.count)
51816+ if (atomic_read(&info->port.count))
51817 return -EBUSY;
51818
51819 DBGINFO(("%s hdlcdev_attach\n", info->device_name));
51820@@ -1544,7 +1544,7 @@ static int hdlcdev_open(struct net_device *dev)
51821
51822 /* arbitrate between network and tty opens */
51823 spin_lock_irqsave(&info->netlock, flags);
51824- if (info->port.count != 0 || info->netcount != 0) {
51825+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
51826 DBGINFO(("%s hdlc_open busy\n", dev->name));
51827 spin_unlock_irqrestore(&info->netlock, flags);
51828 return -EBUSY;
51829@@ -1629,7 +1629,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
51830 DBGINFO(("%s hdlcdev_ioctl\n", dev->name));
51831
51832 /* return error if TTY interface open */
51833- if (info->port.count)
51834+ if (atomic_read(&info->port.count))
51835 return -EBUSY;
51836
51837 if (cmd != SIOCWANDEV)
51838@@ -2413,7 +2413,7 @@ static irqreturn_t slgt_interrupt(int dummy, void *dev_id)
51839 if (port == NULL)
51840 continue;
51841 spin_lock(&port->lock);
51842- if ((port->port.count || port->netcount) &&
51843+ if ((atomic_read(&port->port.count) || port->netcount) &&
51844 port->pending_bh && !port->bh_running &&
51845 !port->bh_requested) {
51846 DBGISR(("%s bh queued\n", port->device_name));
51847@@ -3302,7 +3302,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
51848 spin_lock_irqsave(&info->lock, flags);
51849 if (!tty_hung_up_p(filp)) {
51850 extra_count = true;
51851- port->count--;
51852+ atomic_dec(&port->count);
51853 }
51854 spin_unlock_irqrestore(&info->lock, flags);
51855 port->blocked_open++;
51856@@ -3339,7 +3339,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
51857 remove_wait_queue(&port->open_wait, &wait);
51858
51859 if (extra_count)
51860- port->count++;
51861+ atomic_inc(&port->count);
51862 port->blocked_open--;
51863
51864 if (!retval)
51865diff --git a/drivers/tty/synclinkmp.c b/drivers/tty/synclinkmp.c
51866index 144202e..4ccb07d 100644
51867--- a/drivers/tty/synclinkmp.c
51868+++ b/drivers/tty/synclinkmp.c
51869@@ -750,7 +750,7 @@ static int open(struct tty_struct *tty, struct file *filp)
51870
51871 if (debug_level >= DEBUG_LEVEL_INFO)
51872 printk("%s(%d):%s open(), old ref count = %d\n",
51873- __FILE__,__LINE__,tty->driver->name, info->port.count);
51874+ __FILE__,__LINE__,tty->driver->name, atomic_read(&info->port.count));
51875
51876 /* If port is closing, signal caller to try again */
51877 if (tty_hung_up_p(filp) || info->port.flags & ASYNC_CLOSING){
51878@@ -769,10 +769,10 @@ static int open(struct tty_struct *tty, struct file *filp)
51879 spin_unlock_irqrestore(&info->netlock, flags);
51880 goto cleanup;
51881 }
51882- info->port.count++;
51883+ atomic_inc(&info->port.count);
51884 spin_unlock_irqrestore(&info->netlock, flags);
51885
51886- if (info->port.count == 1) {
51887+ if (atomic_read(&info->port.count) == 1) {
51888 /* 1st open on this device, init hardware */
51889 retval = startup(info);
51890 if (retval < 0)
51891@@ -796,8 +796,8 @@ cleanup:
51892 if (retval) {
51893 if (tty->count == 1)
51894 info->port.tty = NULL; /* tty layer will release tty struct */
51895- if(info->port.count)
51896- info->port.count--;
51897+ if(atomic_read(&info->port.count))
51898+ atomic_dec(&info->port.count);
51899 }
51900
51901 return retval;
51902@@ -815,7 +815,7 @@ static void close(struct tty_struct *tty, struct file *filp)
51903
51904 if (debug_level >= DEBUG_LEVEL_INFO)
51905 printk("%s(%d):%s close() entry, count=%d\n",
51906- __FILE__,__LINE__, info->device_name, info->port.count);
51907+ __FILE__,__LINE__, info->device_name, atomic_read(&info->port.count));
51908
51909 if (tty_port_close_start(&info->port, tty, filp) == 0)
51910 goto cleanup;
51911@@ -834,7 +834,7 @@ static void close(struct tty_struct *tty, struct file *filp)
51912 cleanup:
51913 if (debug_level >= DEBUG_LEVEL_INFO)
51914 printk("%s(%d):%s close() exit, count=%d\n", __FILE__,__LINE__,
51915- tty->driver->name, info->port.count);
51916+ tty->driver->name, atomic_read(&info->port.count));
51917 }
51918
51919 /* Called by tty_hangup() when a hangup is signaled.
51920@@ -857,7 +857,7 @@ static void hangup(struct tty_struct *tty)
51921 shutdown(info);
51922
51923 spin_lock_irqsave(&info->port.lock, flags);
51924- info->port.count = 0;
51925+ atomic_set(&info->port.count, 0);
51926 info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
51927 info->port.tty = NULL;
51928 spin_unlock_irqrestore(&info->port.lock, flags);
51929@@ -1565,7 +1565,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
51930 unsigned short new_crctype;
51931
51932 /* return error if TTY interface open */
51933- if (info->port.count)
51934+ if (atomic_read(&info->port.count))
51935 return -EBUSY;
51936
51937 switch (encoding)
51938@@ -1660,7 +1660,7 @@ static int hdlcdev_open(struct net_device *dev)
51939
51940 /* arbitrate between network and tty opens */
51941 spin_lock_irqsave(&info->netlock, flags);
51942- if (info->port.count != 0 || info->netcount != 0) {
51943+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
51944 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
51945 spin_unlock_irqrestore(&info->netlock, flags);
51946 return -EBUSY;
51947@@ -1746,7 +1746,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
51948 printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name);
51949
51950 /* return error if TTY interface open */
51951- if (info->port.count)
51952+ if (atomic_read(&info->port.count))
51953 return -EBUSY;
51954
51955 if (cmd != SIOCWANDEV)
51956@@ -2620,7 +2620,7 @@ static irqreturn_t synclinkmp_interrupt(int dummy, void *dev_id)
51957 * do not request bottom half processing if the
51958 * device is not open in a normal mode.
51959 */
51960- if ( port && (port->port.count || port->netcount) &&
51961+ if ( port && (atomic_read(&port->port.count) || port->netcount) &&
51962 port->pending_bh && !port->bh_running &&
51963 !port->bh_requested ) {
51964 if ( debug_level >= DEBUG_LEVEL_ISR )
51965@@ -3318,12 +3318,12 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
51966
51967 if (debug_level >= DEBUG_LEVEL_INFO)
51968 printk("%s(%d):%s block_til_ready() before block, count=%d\n",
51969- __FILE__,__LINE__, tty->driver->name, port->count );
51970+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
51971
51972 spin_lock_irqsave(&info->lock, flags);
51973 if (!tty_hung_up_p(filp)) {
51974 extra_count = true;
51975- port->count--;
51976+ atomic_dec(&port->count);
51977 }
51978 spin_unlock_irqrestore(&info->lock, flags);
51979 port->blocked_open++;
51980@@ -3352,7 +3352,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
51981
51982 if (debug_level >= DEBUG_LEVEL_INFO)
51983 printk("%s(%d):%s block_til_ready() count=%d\n",
51984- __FILE__,__LINE__, tty->driver->name, port->count );
51985+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
51986
51987 tty_unlock(tty);
51988 schedule();
51989@@ -3363,12 +3363,12 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
51990 remove_wait_queue(&port->open_wait, &wait);
51991
51992 if (extra_count)
51993- port->count++;
51994+ atomic_inc(&port->count);
51995 port->blocked_open--;
51996
51997 if (debug_level >= DEBUG_LEVEL_INFO)
51998 printk("%s(%d):%s block_til_ready() after, count=%d\n",
51999- __FILE__,__LINE__, tty->driver->name, port->count );
52000+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
52001
52002 if (!retval)
52003 port->flags |= ASYNC_NORMAL_ACTIVE;
52004diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
52005index ce396ec..04a37be 100644
52006--- a/drivers/tty/sysrq.c
52007+++ b/drivers/tty/sysrq.c
52008@@ -1075,7 +1075,7 @@ EXPORT_SYMBOL(unregister_sysrq_key);
52009 static ssize_t write_sysrq_trigger(struct file *file, const char __user *buf,
52010 size_t count, loff_t *ppos)
52011 {
52012- if (count) {
52013+ if (count && capable(CAP_SYS_ADMIN)) {
52014 char c;
52015
52016 if (get_user(c, buf))
52017diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
52018index d3448a9..28e8db0 100644
52019--- a/drivers/tty/tty_io.c
52020+++ b/drivers/tty/tty_io.c
52021@@ -3475,7 +3475,7 @@ EXPORT_SYMBOL_GPL(get_current_tty);
52022
52023 void tty_default_fops(struct file_operations *fops)
52024 {
52025- *fops = tty_fops;
52026+ memcpy((void *)fops, &tty_fops, sizeof(tty_fops));
52027 }
52028
52029 /*
52030diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
52031index 2d822aa..a566234 100644
52032--- a/drivers/tty/tty_ldisc.c
52033+++ b/drivers/tty/tty_ldisc.c
52034@@ -71,7 +71,7 @@ int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc)
52035 raw_spin_lock_irqsave(&tty_ldiscs_lock, flags);
52036 tty_ldiscs[disc] = new_ldisc;
52037 new_ldisc->num = disc;
52038- new_ldisc->refcount = 0;
52039+ atomic_set(&new_ldisc->refcount, 0);
52040 raw_spin_unlock_irqrestore(&tty_ldiscs_lock, flags);
52041
52042 return ret;
52043@@ -99,7 +99,7 @@ int tty_unregister_ldisc(int disc)
52044 return -EINVAL;
52045
52046 raw_spin_lock_irqsave(&tty_ldiscs_lock, flags);
52047- if (tty_ldiscs[disc]->refcount)
52048+ if (atomic_read(&tty_ldiscs[disc]->refcount))
52049 ret = -EBUSY;
52050 else
52051 tty_ldiscs[disc] = NULL;
52052@@ -120,7 +120,7 @@ static struct tty_ldisc_ops *get_ldops(int disc)
52053 if (ldops) {
52054 ret = ERR_PTR(-EAGAIN);
52055 if (try_module_get(ldops->owner)) {
52056- ldops->refcount++;
52057+ atomic_inc(&ldops->refcount);
52058 ret = ldops;
52059 }
52060 }
52061@@ -133,7 +133,7 @@ static void put_ldops(struct tty_ldisc_ops *ldops)
52062 unsigned long flags;
52063
52064 raw_spin_lock_irqsave(&tty_ldiscs_lock, flags);
52065- ldops->refcount--;
52066+ atomic_dec(&ldops->refcount);
52067 module_put(ldops->owner);
52068 raw_spin_unlock_irqrestore(&tty_ldiscs_lock, flags);
52069 }
52070diff --git a/drivers/tty/tty_port.c b/drivers/tty/tty_port.c
52071index 3f746c8..2f2fcaa 100644
52072--- a/drivers/tty/tty_port.c
52073+++ b/drivers/tty/tty_port.c
52074@@ -235,7 +235,7 @@ void tty_port_hangup(struct tty_port *port)
52075 unsigned long flags;
52076
52077 spin_lock_irqsave(&port->lock, flags);
52078- port->count = 0;
52079+ atomic_set(&port->count, 0);
52080 port->flags &= ~ASYNC_NORMAL_ACTIVE;
52081 tty = port->tty;
52082 if (tty)
52083@@ -393,7 +393,7 @@ int tty_port_block_til_ready(struct tty_port *port,
52084 /* The port lock protects the port counts */
52085 spin_lock_irqsave(&port->lock, flags);
52086 if (!tty_hung_up_p(filp))
52087- port->count--;
52088+ atomic_dec(&port->count);
52089 port->blocked_open++;
52090 spin_unlock_irqrestore(&port->lock, flags);
52091
52092@@ -435,7 +435,7 @@ int tty_port_block_til_ready(struct tty_port *port,
52093 we must not mess that up further */
52094 spin_lock_irqsave(&port->lock, flags);
52095 if (!tty_hung_up_p(filp))
52096- port->count++;
52097+ atomic_inc(&port->count);
52098 port->blocked_open--;
52099 if (retval == 0)
52100 port->flags |= ASYNC_NORMAL_ACTIVE;
52101@@ -469,19 +469,19 @@ int tty_port_close_start(struct tty_port *port,
52102 return 0;
52103 }
52104
52105- if (tty->count == 1 && port->count != 1) {
52106+ if (tty->count == 1 && atomic_read(&port->count) != 1) {
52107 printk(KERN_WARNING
52108 "tty_port_close_start: tty->count = 1 port count = %d.\n",
52109- port->count);
52110- port->count = 1;
52111+ atomic_read(&port->count));
52112+ atomic_set(&port->count, 1);
52113 }
52114- if (--port->count < 0) {
52115+ if (atomic_dec_return(&port->count) < 0) {
52116 printk(KERN_WARNING "tty_port_close_start: count = %d\n",
52117- port->count);
52118- port->count = 0;
52119+ atomic_read(&port->count));
52120+ atomic_set(&port->count, 0);
52121 }
52122
52123- if (port->count) {
52124+ if (atomic_read(&port->count)) {
52125 spin_unlock_irqrestore(&port->lock, flags);
52126 return 0;
52127 }
52128@@ -563,7 +563,7 @@ int tty_port_open(struct tty_port *port, struct tty_struct *tty,
52129 {
52130 spin_lock_irq(&port->lock);
52131 if (!tty_hung_up_p(filp))
52132- ++port->count;
52133+ atomic_inc(&port->count);
52134 spin_unlock_irq(&port->lock);
52135 tty_port_tty_set(port, tty);
52136
52137diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c
52138index d0e3a44..5f8b754 100644
52139--- a/drivers/tty/vt/keyboard.c
52140+++ b/drivers/tty/vt/keyboard.c
52141@@ -641,6 +641,16 @@ static void k_spec(struct vc_data *vc, unsigned char value, char up_flag)
52142 kbd->kbdmode == VC_OFF) &&
52143 value != KVAL(K_SAK))
52144 return; /* SAK is allowed even in raw mode */
52145+
52146+#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
52147+ {
52148+ void *func = fn_handler[value];
52149+ if (func == fn_show_state || func == fn_show_ptregs ||
52150+ func == fn_show_mem)
52151+ return;
52152+ }
52153+#endif
52154+
52155 fn_handler[value](vc);
52156 }
52157
52158@@ -1776,9 +1786,6 @@ int vt_do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm,
52159 if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
52160 return -EFAULT;
52161
52162- if (!capable(CAP_SYS_TTY_CONFIG))
52163- perm = 0;
52164-
52165 switch (cmd) {
52166 case KDGKBENT:
52167 /* Ensure another thread doesn't free it under us */
52168@@ -1793,6 +1800,9 @@ int vt_do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm,
52169 spin_unlock_irqrestore(&kbd_event_lock, flags);
52170 return put_user(val, &user_kbe->kb_value);
52171 case KDSKBENT:
52172+ if (!capable(CAP_SYS_TTY_CONFIG))
52173+ perm = 0;
52174+
52175 if (!perm)
52176 return -EPERM;
52177 if (!i && v == K_NOSUCHMAP) {
52178@@ -1883,9 +1893,6 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
52179 int i, j, k;
52180 int ret;
52181
52182- if (!capable(CAP_SYS_TTY_CONFIG))
52183- perm = 0;
52184-
52185 kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
52186 if (!kbs) {
52187 ret = -ENOMEM;
52188@@ -1919,6 +1926,9 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
52189 kfree(kbs);
52190 return ((p && *p) ? -EOVERFLOW : 0);
52191 case KDSKBSENT:
52192+ if (!capable(CAP_SYS_TTY_CONFIG))
52193+ perm = 0;
52194+
52195 if (!perm) {
52196 ret = -EPERM;
52197 goto reterr;
52198diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
52199index a673e5b..36e5d32 100644
52200--- a/drivers/uio/uio.c
52201+++ b/drivers/uio/uio.c
52202@@ -25,6 +25,7 @@
52203 #include <linux/kobject.h>
52204 #include <linux/cdev.h>
52205 #include <linux/uio_driver.h>
52206+#include <asm/local.h>
52207
52208 #define UIO_MAX_DEVICES (1U << MINORBITS)
52209
52210@@ -32,7 +33,7 @@ struct uio_device {
52211 struct module *owner;
52212 struct device *dev;
52213 int minor;
52214- atomic_t event;
52215+ atomic_unchecked_t event;
52216 struct fasync_struct *async_queue;
52217 wait_queue_head_t wait;
52218 struct uio_info *info;
52219@@ -243,7 +244,7 @@ static ssize_t event_show(struct device *dev,
52220 struct device_attribute *attr, char *buf)
52221 {
52222 struct uio_device *idev = dev_get_drvdata(dev);
52223- return sprintf(buf, "%u\n", (unsigned int)atomic_read(&idev->event));
52224+ return sprintf(buf, "%u\n", (unsigned int)atomic_read_unchecked(&idev->event));
52225 }
52226 static DEVICE_ATTR_RO(event);
52227
52228@@ -405,7 +406,7 @@ void uio_event_notify(struct uio_info *info)
52229 {
52230 struct uio_device *idev = info->uio_dev;
52231
52232- atomic_inc(&idev->event);
52233+ atomic_inc_unchecked(&idev->event);
52234 wake_up_interruptible(&idev->wait);
52235 kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
52236 }
52237@@ -458,7 +459,7 @@ static int uio_open(struct inode *inode, struct file *filep)
52238 }
52239
52240 listener->dev = idev;
52241- listener->event_count = atomic_read(&idev->event);
52242+ listener->event_count = atomic_read_unchecked(&idev->event);
52243 filep->private_data = listener;
52244
52245 if (idev->info->open) {
52246@@ -509,7 +510,7 @@ static unsigned int uio_poll(struct file *filep, poll_table *wait)
52247 return -EIO;
52248
52249 poll_wait(filep, &idev->wait, wait);
52250- if (listener->event_count != atomic_read(&idev->event))
52251+ if (listener->event_count != atomic_read_unchecked(&idev->event))
52252 return POLLIN | POLLRDNORM;
52253 return 0;
52254 }
52255@@ -534,7 +535,7 @@ static ssize_t uio_read(struct file *filep, char __user *buf,
52256 do {
52257 set_current_state(TASK_INTERRUPTIBLE);
52258
52259- event_count = atomic_read(&idev->event);
52260+ event_count = atomic_read_unchecked(&idev->event);
52261 if (event_count != listener->event_count) {
52262 if (copy_to_user(buf, &event_count, count))
52263 retval = -EFAULT;
52264@@ -591,9 +592,13 @@ static ssize_t uio_write(struct file *filep, const char __user *buf,
52265 static int uio_find_mem_index(struct vm_area_struct *vma)
52266 {
52267 struct uio_device *idev = vma->vm_private_data;
52268+ unsigned long size;
52269
52270 if (vma->vm_pgoff < MAX_UIO_MAPS) {
52271- if (idev->info->mem[vma->vm_pgoff].size == 0)
52272+ size = idev->info->mem[vma->vm_pgoff].size;
52273+ if (size == 0)
52274+ return -1;
52275+ if (vma->vm_end - vma->vm_start > size)
52276 return -1;
52277 return (int)vma->vm_pgoff;
52278 }
52279@@ -825,7 +830,7 @@ int __uio_register_device(struct module *owner,
52280 idev->owner = owner;
52281 idev->info = info;
52282 init_waitqueue_head(&idev->wait);
52283- atomic_set(&idev->event, 0);
52284+ atomic_set_unchecked(&idev->event, 0);
52285
52286 ret = uio_get_minor(idev);
52287 if (ret)
52288diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c
52289index 813d4d3..a71934f 100644
52290--- a/drivers/usb/atm/cxacru.c
52291+++ b/drivers/usb/atm/cxacru.c
52292@@ -472,7 +472,7 @@ static ssize_t cxacru_sysfs_store_adsl_config(struct device *dev,
52293 ret = sscanf(buf + pos, "%x=%x%n", &index, &value, &tmp);
52294 if (ret < 2)
52295 return -EINVAL;
52296- if (index < 0 || index > 0x7f)
52297+ if (index > 0x7f)
52298 return -EINVAL;
52299 pos += tmp;
52300
52301diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c
52302index dada014..1d0d517 100644
52303--- a/drivers/usb/atm/usbatm.c
52304+++ b/drivers/usb/atm/usbatm.c
52305@@ -331,7 +331,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
52306 if (printk_ratelimit())
52307 atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
52308 __func__, vpi, vci);
52309- atomic_inc(&vcc->stats->rx_err);
52310+ atomic_inc_unchecked(&vcc->stats->rx_err);
52311 return;
52312 }
52313
52314@@ -358,7 +358,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
52315 if (length > ATM_MAX_AAL5_PDU) {
52316 atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
52317 __func__, length, vcc);
52318- atomic_inc(&vcc->stats->rx_err);
52319+ atomic_inc_unchecked(&vcc->stats->rx_err);
52320 goto out;
52321 }
52322
52323@@ -367,14 +367,14 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
52324 if (sarb->len < pdu_length) {
52325 atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
52326 __func__, pdu_length, sarb->len, vcc);
52327- atomic_inc(&vcc->stats->rx_err);
52328+ atomic_inc_unchecked(&vcc->stats->rx_err);
52329 goto out;
52330 }
52331
52332 if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
52333 atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
52334 __func__, vcc);
52335- atomic_inc(&vcc->stats->rx_err);
52336+ atomic_inc_unchecked(&vcc->stats->rx_err);
52337 goto out;
52338 }
52339
52340@@ -386,7 +386,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
52341 if (printk_ratelimit())
52342 atm_err(instance, "%s: no memory for skb (length: %u)!\n",
52343 __func__, length);
52344- atomic_inc(&vcc->stats->rx_drop);
52345+ atomic_inc_unchecked(&vcc->stats->rx_drop);
52346 goto out;
52347 }
52348
52349@@ -414,7 +414,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
52350
52351 vcc->push(vcc, skb);
52352
52353- atomic_inc(&vcc->stats->rx);
52354+ atomic_inc_unchecked(&vcc->stats->rx);
52355 out:
52356 skb_trim(sarb, 0);
52357 }
52358@@ -612,7 +612,7 @@ static void usbatm_tx_process(unsigned long data)
52359 struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
52360
52361 usbatm_pop(vcc, skb);
52362- atomic_inc(&vcc->stats->tx);
52363+ atomic_inc_unchecked(&vcc->stats->tx);
52364
52365 skb = skb_dequeue(&instance->sndqueue);
52366 }
52367@@ -756,11 +756,11 @@ static int usbatm_atm_proc_read(struct atm_dev *atm_dev, loff_t *pos, char *page
52368 if (!left--)
52369 return sprintf(page,
52370 "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
52371- atomic_read(&atm_dev->stats.aal5.tx),
52372- atomic_read(&atm_dev->stats.aal5.tx_err),
52373- atomic_read(&atm_dev->stats.aal5.rx),
52374- atomic_read(&atm_dev->stats.aal5.rx_err),
52375- atomic_read(&atm_dev->stats.aal5.rx_drop));
52376+ atomic_read_unchecked(&atm_dev->stats.aal5.tx),
52377+ atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
52378+ atomic_read_unchecked(&atm_dev->stats.aal5.rx),
52379+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
52380+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
52381
52382 if (!left--) {
52383 if (instance->disconnected)
52384diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c
52385index 2a3bbdf..91d72cf 100644
52386--- a/drivers/usb/core/devices.c
52387+++ b/drivers/usb/core/devices.c
52388@@ -126,7 +126,7 @@ static const char format_endpt[] =
52389 * time it gets called.
52390 */
52391 static struct device_connect_event {
52392- atomic_t count;
52393+ atomic_unchecked_t count;
52394 wait_queue_head_t wait;
52395 } device_event = {
52396 .count = ATOMIC_INIT(1),
52397@@ -164,7 +164,7 @@ static const struct class_info clas_info[] = {
52398
52399 void usbfs_conn_disc_event(void)
52400 {
52401- atomic_add(2, &device_event.count);
52402+ atomic_add_unchecked(2, &device_event.count);
52403 wake_up(&device_event.wait);
52404 }
52405
52406@@ -652,7 +652,7 @@ static unsigned int usb_device_poll(struct file *file,
52407
52408 poll_wait(file, &device_event.wait, wait);
52409
52410- event_count = atomic_read(&device_event.count);
52411+ event_count = atomic_read_unchecked(&device_event.count);
52412 if (file->f_version != event_count) {
52413 file->f_version = event_count;
52414 return POLLIN | POLLRDNORM;
52415diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
52416index 90e18f6..5eeda46 100644
52417--- a/drivers/usb/core/devio.c
52418+++ b/drivers/usb/core/devio.c
52419@@ -187,7 +187,7 @@ static ssize_t usbdev_read(struct file *file, char __user *buf, size_t nbytes,
52420 struct dev_state *ps = file->private_data;
52421 struct usb_device *dev = ps->dev;
52422 ssize_t ret = 0;
52423- unsigned len;
52424+ size_t len;
52425 loff_t pos;
52426 int i;
52427
52428@@ -229,22 +229,22 @@ static ssize_t usbdev_read(struct file *file, char __user *buf, size_t nbytes,
52429 for (i = 0; nbytes && i < dev->descriptor.bNumConfigurations; i++) {
52430 struct usb_config_descriptor *config =
52431 (struct usb_config_descriptor *)dev->rawdescriptors[i];
52432- unsigned int length = le16_to_cpu(config->wTotalLength);
52433+ size_t length = le16_to_cpu(config->wTotalLength);
52434
52435 if (*ppos < pos + length) {
52436
52437 /* The descriptor may claim to be longer than it
52438 * really is. Here is the actual allocated length. */
52439- unsigned alloclen =
52440+ size_t alloclen =
52441 le16_to_cpu(dev->config[i].desc.wTotalLength);
52442
52443- len = length - (*ppos - pos);
52444+ len = length + pos - *ppos;
52445 if (len > nbytes)
52446 len = nbytes;
52447
52448 /* Simply don't write (skip over) unallocated parts */
52449 if (alloclen > (*ppos - pos)) {
52450- alloclen -= (*ppos - pos);
52451+ alloclen = alloclen + pos - *ppos;
52452 if (copy_to_user(buf,
52453 dev->rawdescriptors[i] + (*ppos - pos),
52454 min(len, alloclen))) {
52455diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
52456index 2518c32..1c201bb 100644
52457--- a/drivers/usb/core/hcd.c
52458+++ b/drivers/usb/core/hcd.c
52459@@ -1550,7 +1550,7 @@ int usb_hcd_submit_urb (struct urb *urb, gfp_t mem_flags)
52460 */
52461 usb_get_urb(urb);
52462 atomic_inc(&urb->use_count);
52463- atomic_inc(&urb->dev->urbnum);
52464+ atomic_inc_unchecked(&urb->dev->urbnum);
52465 usbmon_urb_submit(&hcd->self, urb);
52466
52467 /* NOTE requirements on root-hub callers (usbfs and the hub
52468@@ -1577,7 +1577,7 @@ int usb_hcd_submit_urb (struct urb *urb, gfp_t mem_flags)
52469 urb->hcpriv = NULL;
52470 INIT_LIST_HEAD(&urb->urb_list);
52471 atomic_dec(&urb->use_count);
52472- atomic_dec(&urb->dev->urbnum);
52473+ atomic_dec_unchecked(&urb->dev->urbnum);
52474 if (atomic_read(&urb->reject))
52475 wake_up(&usb_kill_urb_queue);
52476 usb_put_urb(urb);
52477diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
52478index d498d03..e26f959 100644
52479--- a/drivers/usb/core/hub.c
52480+++ b/drivers/usb/core/hub.c
52481@@ -27,6 +27,7 @@
52482 #include <linux/freezer.h>
52483 #include <linux/random.h>
52484 #include <linux/pm_qos.h>
52485+#include <linux/grsecurity.h>
52486
52487 #include <asm/uaccess.h>
52488 #include <asm/byteorder.h>
52489@@ -4472,6 +4473,10 @@ static void hub_port_connect_change(struct usb_hub *hub, int port1,
52490 goto done;
52491 return;
52492 }
52493+
52494+ if (gr_handle_new_usb())
52495+ goto done;
52496+
52497 if (hub_is_superspeed(hub->hdev))
52498 unit_load = 150;
52499 else
52500diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
52501index f829a1a..e6c334a 100644
52502--- a/drivers/usb/core/message.c
52503+++ b/drivers/usb/core/message.c
52504@@ -128,7 +128,7 @@ static int usb_internal_control_msg(struct usb_device *usb_dev,
52505 * Return: If successful, the number of bytes transferred. Otherwise, a negative
52506 * error number.
52507 */
52508-int usb_control_msg(struct usb_device *dev, unsigned int pipe, __u8 request,
52509+int __intentional_overflow(-1) usb_control_msg(struct usb_device *dev, unsigned int pipe, __u8 request,
52510 __u8 requesttype, __u16 value, __u16 index, void *data,
52511 __u16 size, int timeout)
52512 {
52513@@ -180,7 +180,7 @@ EXPORT_SYMBOL_GPL(usb_control_msg);
52514 * If successful, 0. Otherwise a negative error number. The number of actual
52515 * bytes transferred will be stored in the @actual_length paramater.
52516 */
52517-int usb_interrupt_msg(struct usb_device *usb_dev, unsigned int pipe,
52518+int __intentional_overflow(-1) usb_interrupt_msg(struct usb_device *usb_dev, unsigned int pipe,
52519 void *data, int len, int *actual_length, int timeout)
52520 {
52521 return usb_bulk_msg(usb_dev, pipe, data, len, actual_length, timeout);
52522@@ -220,7 +220,7 @@ EXPORT_SYMBOL_GPL(usb_interrupt_msg);
52523 * bytes transferred will be stored in the @actual_length parameter.
52524 *
52525 */
52526-int usb_bulk_msg(struct usb_device *usb_dev, unsigned int pipe,
52527+int __intentional_overflow(-1) usb_bulk_msg(struct usb_device *usb_dev, unsigned int pipe,
52528 void *data, int len, int *actual_length, int timeout)
52529 {
52530 struct urb *urb;
52531diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c
52532index 1236c60..d47a51c 100644
52533--- a/drivers/usb/core/sysfs.c
52534+++ b/drivers/usb/core/sysfs.c
52535@@ -244,7 +244,7 @@ static ssize_t urbnum_show(struct device *dev, struct device_attribute *attr,
52536 struct usb_device *udev;
52537
52538 udev = to_usb_device(dev);
52539- return sprintf(buf, "%d\n", atomic_read(&udev->urbnum));
52540+ return sprintf(buf, "%d\n", atomic_read_unchecked(&udev->urbnum));
52541 }
52542 static DEVICE_ATTR_RO(urbnum);
52543
52544diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
52545index 4d11449..f4ccabf 100644
52546--- a/drivers/usb/core/usb.c
52547+++ b/drivers/usb/core/usb.c
52548@@ -433,7 +433,7 @@ struct usb_device *usb_alloc_dev(struct usb_device *parent,
52549 set_dev_node(&dev->dev, dev_to_node(bus->controller));
52550 dev->state = USB_STATE_ATTACHED;
52551 dev->lpm_disable_count = 1;
52552- atomic_set(&dev->urbnum, 0);
52553+ atomic_set_unchecked(&dev->urbnum, 0);
52554
52555 INIT_LIST_HEAD(&dev->ep0.urb_list);
52556 dev->ep0.desc.bLength = USB_DT_ENDPOINT_SIZE;
52557diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
52558index 2da0a5a..4870e09 100644
52559--- a/drivers/usb/dwc3/gadget.c
52560+++ b/drivers/usb/dwc3/gadget.c
52561@@ -532,8 +532,6 @@ static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep,
52562 if (!usb_endpoint_xfer_isoc(desc))
52563 return 0;
52564
52565- memset(&trb_link, 0, sizeof(trb_link));
52566-
52567 /* Link TRB for ISOC. The HWO bit is never reset */
52568 trb_st_hw = &dep->trb_pool[0];
52569
52570diff --git a/drivers/usb/early/ehci-dbgp.c b/drivers/usb/early/ehci-dbgp.c
52571index 8cfc319..4868255 100644
52572--- a/drivers/usb/early/ehci-dbgp.c
52573+++ b/drivers/usb/early/ehci-dbgp.c
52574@@ -98,7 +98,8 @@ static inline u32 dbgp_len_update(u32 x, u32 len)
52575
52576 #ifdef CONFIG_KGDB
52577 static struct kgdb_io kgdbdbgp_io_ops;
52578-#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops)
52579+static struct kgdb_io kgdbdbgp_io_ops_console;
52580+#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops || dbg_io_ops == &kgdbdbgp_io_ops_console)
52581 #else
52582 #define dbgp_kgdb_mode (0)
52583 #endif
52584@@ -1043,6 +1044,13 @@ static struct kgdb_io kgdbdbgp_io_ops = {
52585 .write_char = kgdbdbgp_write_char,
52586 };
52587
52588+static struct kgdb_io kgdbdbgp_io_ops_console = {
52589+ .name = "kgdbdbgp",
52590+ .read_char = kgdbdbgp_read_char,
52591+ .write_char = kgdbdbgp_write_char,
52592+ .is_console = 1
52593+};
52594+
52595 static int kgdbdbgp_wait_time;
52596
52597 static int __init kgdbdbgp_parse_config(char *str)
52598@@ -1058,8 +1066,10 @@ static int __init kgdbdbgp_parse_config(char *str)
52599 ptr++;
52600 kgdbdbgp_wait_time = simple_strtoul(ptr, &ptr, 10);
52601 }
52602- kgdb_register_io_module(&kgdbdbgp_io_ops);
52603- kgdbdbgp_io_ops.is_console = early_dbgp_console.index != -1;
52604+ if (early_dbgp_console.index != -1)
52605+ kgdb_register_io_module(&kgdbdbgp_io_ops_console);
52606+ else
52607+ kgdb_register_io_module(&kgdbdbgp_io_ops);
52608
52609 return 0;
52610 }
52611diff --git a/drivers/usb/gadget/f_uac1.c b/drivers/usb/gadget/f_uac1.c
52612index 2b4c82d..06a8ee6 100644
52613--- a/drivers/usb/gadget/f_uac1.c
52614+++ b/drivers/usb/gadget/f_uac1.c
52615@@ -13,6 +13,7 @@
52616 #include <linux/kernel.h>
52617 #include <linux/device.h>
52618 #include <linux/atomic.h>
52619+#include <linux/module.h>
52620
52621 #include "u_uac1.h"
52622
52623diff --git a/drivers/usb/gadget/u_serial.c b/drivers/usb/gadget/u_serial.c
52624index ad0aca8..8ff84865 100644
52625--- a/drivers/usb/gadget/u_serial.c
52626+++ b/drivers/usb/gadget/u_serial.c
52627@@ -733,9 +733,9 @@ static int gs_open(struct tty_struct *tty, struct file *file)
52628 spin_lock_irq(&port->port_lock);
52629
52630 /* already open? Great. */
52631- if (port->port.count) {
52632+ if (atomic_read(&port->port.count)) {
52633 status = 0;
52634- port->port.count++;
52635+ atomic_inc(&port->port.count);
52636
52637 /* currently opening/closing? wait ... */
52638 } else if (port->openclose) {
52639@@ -794,7 +794,7 @@ static int gs_open(struct tty_struct *tty, struct file *file)
52640 tty->driver_data = port;
52641 port->port.tty = tty;
52642
52643- port->port.count = 1;
52644+ atomic_set(&port->port.count, 1);
52645 port->openclose = false;
52646
52647 /* if connected, start the I/O stream */
52648@@ -836,11 +836,11 @@ static void gs_close(struct tty_struct *tty, struct file *file)
52649
52650 spin_lock_irq(&port->port_lock);
52651
52652- if (port->port.count != 1) {
52653- if (port->port.count == 0)
52654+ if (atomic_read(&port->port.count) != 1) {
52655+ if (atomic_read(&port->port.count) == 0)
52656 WARN_ON(1);
52657 else
52658- --port->port.count;
52659+ atomic_dec(&port->port.count);
52660 goto exit;
52661 }
52662
52663@@ -850,7 +850,7 @@ static void gs_close(struct tty_struct *tty, struct file *file)
52664 * and sleep if necessary
52665 */
52666 port->openclose = true;
52667- port->port.count = 0;
52668+ atomic_set(&port->port.count, 0);
52669
52670 gser = port->port_usb;
52671 if (gser && gser->disconnect)
52672@@ -1066,7 +1066,7 @@ static int gs_closed(struct gs_port *port)
52673 int cond;
52674
52675 spin_lock_irq(&port->port_lock);
52676- cond = (port->port.count == 0) && !port->openclose;
52677+ cond = (atomic_read(&port->port.count) == 0) && !port->openclose;
52678 spin_unlock_irq(&port->port_lock);
52679 return cond;
52680 }
52681@@ -1209,7 +1209,7 @@ int gserial_connect(struct gserial *gser, u8 port_num)
52682 /* if it's already open, start I/O ... and notify the serial
52683 * protocol about open/close status (connect/disconnect).
52684 */
52685- if (port->port.count) {
52686+ if (atomic_read(&port->port.count)) {
52687 pr_debug("gserial_connect: start ttyGS%d\n", port->port_num);
52688 gs_start_io(port);
52689 if (gser->connect)
52690@@ -1256,7 +1256,7 @@ void gserial_disconnect(struct gserial *gser)
52691
52692 port->port_usb = NULL;
52693 gser->ioport = NULL;
52694- if (port->port.count > 0 || port->openclose) {
52695+ if (atomic_read(&port->port.count) > 0 || port->openclose) {
52696 wake_up_interruptible(&port->drain_wait);
52697 if (port->port.tty)
52698 tty_hangup(port->port.tty);
52699@@ -1272,7 +1272,7 @@ void gserial_disconnect(struct gserial *gser)
52700
52701 /* finally, free any unused/unusable I/O buffers */
52702 spin_lock_irqsave(&port->port_lock, flags);
52703- if (port->port.count == 0 && !port->openclose)
52704+ if (atomic_read(&port->port.count) == 0 && !port->openclose)
52705 gs_buf_free(&port->port_write_buf);
52706 gs_free_requests(gser->out, &port->read_pool, NULL);
52707 gs_free_requests(gser->out, &port->read_queue, NULL);
52708diff --git a/drivers/usb/gadget/u_uac1.c b/drivers/usb/gadget/u_uac1.c
52709index 7a55fea..cc0ed4f 100644
52710--- a/drivers/usb/gadget/u_uac1.c
52711+++ b/drivers/usb/gadget/u_uac1.c
52712@@ -16,6 +16,7 @@
52713 #include <linux/ctype.h>
52714 #include <linux/random.h>
52715 #include <linux/syscalls.h>
52716+#include <linux/module.h>
52717
52718 #include "u_uac1.h"
52719
52720diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
52721index 7ae0c4d..35521b7 100644
52722--- a/drivers/usb/host/ehci-hub.c
52723+++ b/drivers/usb/host/ehci-hub.c
52724@@ -780,7 +780,7 @@ static struct urb *request_single_step_set_feature_urb(
52725 urb->transfer_flags = URB_DIR_IN;
52726 usb_get_urb(urb);
52727 atomic_inc(&urb->use_count);
52728- atomic_inc(&urb->dev->urbnum);
52729+ atomic_inc_unchecked(&urb->dev->urbnum);
52730 urb->setup_dma = dma_map_single(
52731 hcd->self.controller,
52732 urb->setup_packet,
52733@@ -847,7 +847,7 @@ static int ehset_single_step_set_feature(struct usb_hcd *hcd, int port)
52734 urb->status = -EINPROGRESS;
52735 usb_get_urb(urb);
52736 atomic_inc(&urb->use_count);
52737- atomic_inc(&urb->dev->urbnum);
52738+ atomic_inc_unchecked(&urb->dev->urbnum);
52739 retval = submit_single_step_set_feature(hcd, urb, 0);
52740 if (!retval && !wait_for_completion_timeout(&done,
52741 msecs_to_jiffies(2000))) {
52742diff --git a/drivers/usb/misc/appledisplay.c b/drivers/usb/misc/appledisplay.c
52743index ba6a5d6..f88f7f3 100644
52744--- a/drivers/usb/misc/appledisplay.c
52745+++ b/drivers/usb/misc/appledisplay.c
52746@@ -83,7 +83,7 @@ struct appledisplay {
52747 spinlock_t lock;
52748 };
52749
52750-static atomic_t count_displays = ATOMIC_INIT(0);
52751+static atomic_unchecked_t count_displays = ATOMIC_INIT(0);
52752 static struct workqueue_struct *wq;
52753
52754 static void appledisplay_complete(struct urb *urb)
52755@@ -281,7 +281,7 @@ static int appledisplay_probe(struct usb_interface *iface,
52756
52757 /* Register backlight device */
52758 snprintf(bl_name, sizeof(bl_name), "appledisplay%d",
52759- atomic_inc_return(&count_displays) - 1);
52760+ atomic_inc_return_unchecked(&count_displays) - 1);
52761 memset(&props, 0, sizeof(struct backlight_properties));
52762 props.type = BACKLIGHT_RAW;
52763 props.max_brightness = 0xff;
52764diff --git a/drivers/usb/serial/console.c b/drivers/usb/serial/console.c
52765index 8d7fc48..01c4986 100644
52766--- a/drivers/usb/serial/console.c
52767+++ b/drivers/usb/serial/console.c
52768@@ -123,7 +123,7 @@ static int usb_console_setup(struct console *co, char *options)
52769
52770 info->port = port;
52771
52772- ++port->port.count;
52773+ atomic_inc(&port->port.count);
52774 if (!test_bit(ASYNCB_INITIALIZED, &port->port.flags)) {
52775 if (serial->type->set_termios) {
52776 /*
52777@@ -167,7 +167,7 @@ static int usb_console_setup(struct console *co, char *options)
52778 }
52779 /* Now that any required fake tty operations are completed restore
52780 * the tty port count */
52781- --port->port.count;
52782+ atomic_dec(&port->port.count);
52783 /* The console is special in terms of closing the device so
52784 * indicate this port is now acting as a system console. */
52785 port->port.console = 1;
52786@@ -180,7 +180,7 @@ static int usb_console_setup(struct console *co, char *options)
52787 free_tty:
52788 kfree(tty);
52789 reset_open_count:
52790- port->port.count = 0;
52791+ atomic_set(&port->port.count, 0);
52792 usb_autopm_put_interface(serial->interface);
52793 error_get_interface:
52794 usb_serial_put(serial);
52795@@ -191,7 +191,7 @@ static int usb_console_setup(struct console *co, char *options)
52796 static void usb_console_write(struct console *co,
52797 const char *buf, unsigned count)
52798 {
52799- static struct usbcons_info *info = &usbcons_info;
52800+ struct usbcons_info *info = &usbcons_info;
52801 struct usb_serial_port *port = info->port;
52802 struct usb_serial *serial;
52803 int retval = -ENODEV;
52804diff --git a/drivers/usb/storage/usb.h b/drivers/usb/storage/usb.h
52805index 75f70f0..d467e1a 100644
52806--- a/drivers/usb/storage/usb.h
52807+++ b/drivers/usb/storage/usb.h
52808@@ -63,7 +63,7 @@ struct us_unusual_dev {
52809 __u8 useProtocol;
52810 __u8 useTransport;
52811 int (*initFunction)(struct us_data *);
52812-};
52813+} __do_const;
52814
52815
52816 /* Dynamic bitflag definitions (us->dflags): used in set_bit() etc. */
52817diff --git a/drivers/usb/wusbcore/wa-hc.h b/drivers/usb/wusbcore/wa-hc.h
52818index a2ef84b..aa7c2b8 100644
52819--- a/drivers/usb/wusbcore/wa-hc.h
52820+++ b/drivers/usb/wusbcore/wa-hc.h
52821@@ -225,7 +225,7 @@ struct wahc {
52822 spinlock_t xfer_list_lock;
52823 struct work_struct xfer_enqueue_work;
52824 struct work_struct xfer_error_work;
52825- atomic_t xfer_id_count;
52826+ atomic_unchecked_t xfer_id_count;
52827
52828 kernel_ulong_t quirks;
52829 };
52830@@ -287,7 +287,7 @@ static inline void wa_init(struct wahc *wa)
52831 INIT_WORK(&wa->xfer_enqueue_work, wa_urb_enqueue_run);
52832 INIT_WORK(&wa->xfer_error_work, wa_process_errored_transfers_run);
52833 wa->dto_in_use = 0;
52834- atomic_set(&wa->xfer_id_count, 1);
52835+ atomic_set_unchecked(&wa->xfer_id_count, 1);
52836 }
52837
52838 /**
52839diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c
52840index 3cd96e9..bd7c58d 100644
52841--- a/drivers/usb/wusbcore/wa-xfer.c
52842+++ b/drivers/usb/wusbcore/wa-xfer.c
52843@@ -312,7 +312,7 @@ static void wa_xfer_completion(struct wa_xfer *xfer)
52844 */
52845 static void wa_xfer_id_init(struct wa_xfer *xfer)
52846 {
52847- xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
52848+ xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
52849 }
52850
52851 /* Return the xfer's ID. */
52852diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c
52853index 21271d8..45b55a0 100644
52854--- a/drivers/vfio/vfio.c
52855+++ b/drivers/vfio/vfio.c
52856@@ -487,7 +487,7 @@ static int vfio_group_nb_add_dev(struct vfio_group *group, struct device *dev)
52857 return 0;
52858
52859 /* TODO Prevent device auto probing */
52860- WARN("Device %s added to live group %d!\n", dev_name(dev),
52861+ WARN(1, "Device %s added to live group %d!\n", dev_name(dev),
52862 iommu_group_id(group->iommu_group));
52863
52864 return 0;
52865diff --git a/drivers/vhost/vringh.c b/drivers/vhost/vringh.c
52866index 5174eba..451e6bc 100644
52867--- a/drivers/vhost/vringh.c
52868+++ b/drivers/vhost/vringh.c
52869@@ -530,17 +530,17 @@ static inline void __vringh_notify_disable(struct vringh *vrh,
52870 /* Userspace access helpers: in this case, addresses are really userspace. */
52871 static inline int getu16_user(u16 *val, const u16 *p)
52872 {
52873- return get_user(*val, (__force u16 __user *)p);
52874+ return get_user(*val, (u16 __force_user *)p);
52875 }
52876
52877 static inline int putu16_user(u16 *p, u16 val)
52878 {
52879- return put_user(val, (__force u16 __user *)p);
52880+ return put_user(val, (u16 __force_user *)p);
52881 }
52882
52883 static inline int copydesc_user(void *dst, const void *src, size_t len)
52884 {
52885- return copy_from_user(dst, (__force void __user *)src, len) ?
52886+ return copy_from_user(dst, (void __force_user *)src, len) ?
52887 -EFAULT : 0;
52888 }
52889
52890@@ -548,19 +548,19 @@ static inline int putused_user(struct vring_used_elem *dst,
52891 const struct vring_used_elem *src,
52892 unsigned int num)
52893 {
52894- return copy_to_user((__force void __user *)dst, src,
52895+ return copy_to_user((void __force_user *)dst, src,
52896 sizeof(*dst) * num) ? -EFAULT : 0;
52897 }
52898
52899 static inline int xfer_from_user(void *src, void *dst, size_t len)
52900 {
52901- return copy_from_user(dst, (__force void __user *)src, len) ?
52902+ return copy_from_user(dst, (void __force_user *)src, len) ?
52903 -EFAULT : 0;
52904 }
52905
52906 static inline int xfer_to_user(void *dst, void *src, size_t len)
52907 {
52908- return copy_to_user((__force void __user *)dst, src, len) ?
52909+ return copy_to_user((void __force_user *)dst, src, len) ?
52910 -EFAULT : 0;
52911 }
52912
52913@@ -596,9 +596,9 @@ int vringh_init_user(struct vringh *vrh, u32 features,
52914 vrh->last_used_idx = 0;
52915 vrh->vring.num = num;
52916 /* vring expects kernel addresses, but only used via accessors. */
52917- vrh->vring.desc = (__force struct vring_desc *)desc;
52918- vrh->vring.avail = (__force struct vring_avail *)avail;
52919- vrh->vring.used = (__force struct vring_used *)used;
52920+ vrh->vring.desc = (__force_kernel struct vring_desc *)desc;
52921+ vrh->vring.avail = (__force_kernel struct vring_avail *)avail;
52922+ vrh->vring.used = (__force_kernel struct vring_used *)used;
52923 return 0;
52924 }
52925 EXPORT_SYMBOL(vringh_init_user);
52926@@ -800,7 +800,7 @@ static inline int getu16_kern(u16 *val, const u16 *p)
52927
52928 static inline int putu16_kern(u16 *p, u16 val)
52929 {
52930- ACCESS_ONCE(*p) = val;
52931+ ACCESS_ONCE_RW(*p) = val;
52932 return 0;
52933 }
52934
52935diff --git a/drivers/video/arcfb.c b/drivers/video/arcfb.c
52936index 1b0b233..6f34c2c 100644
52937--- a/drivers/video/arcfb.c
52938+++ b/drivers/video/arcfb.c
52939@@ -458,7 +458,7 @@ static ssize_t arcfb_write(struct fb_info *info, const char __user *buf,
52940 return -ENOSPC;
52941
52942 err = 0;
52943- if ((count + p) > fbmemlength) {
52944+ if (count > (fbmemlength - p)) {
52945 count = fbmemlength - p;
52946 err = -ENOSPC;
52947 }
52948diff --git a/drivers/video/aty/aty128fb.c b/drivers/video/aty/aty128fb.c
52949index 52108be..c7c110d 100644
52950--- a/drivers/video/aty/aty128fb.c
52951+++ b/drivers/video/aty/aty128fb.c
52952@@ -149,7 +149,7 @@ enum {
52953 };
52954
52955 /* Must match above enum */
52956-static char * const r128_family[] = {
52957+static const char * const r128_family[] = {
52958 "AGP",
52959 "PCI",
52960 "PRO AGP",
52961diff --git a/drivers/video/aty/atyfb_base.c b/drivers/video/aty/atyfb_base.c
52962index 28fafbf..ae91651 100644
52963--- a/drivers/video/aty/atyfb_base.c
52964+++ b/drivers/video/aty/atyfb_base.c
52965@@ -1326,10 +1326,14 @@ static int atyfb_set_par(struct fb_info *info)
52966 par->accel_flags = var->accel_flags; /* hack */
52967
52968 if (var->accel_flags) {
52969- info->fbops->fb_sync = atyfb_sync;
52970+ pax_open_kernel();
52971+ *(void **)&info->fbops->fb_sync = atyfb_sync;
52972+ pax_close_kernel();
52973 info->flags &= ~FBINFO_HWACCEL_DISABLED;
52974 } else {
52975- info->fbops->fb_sync = NULL;
52976+ pax_open_kernel();
52977+ *(void **)&info->fbops->fb_sync = NULL;
52978+ pax_close_kernel();
52979 info->flags |= FBINFO_HWACCEL_DISABLED;
52980 }
52981
52982diff --git a/drivers/video/aty/mach64_cursor.c b/drivers/video/aty/mach64_cursor.c
52983index 0fe02e2..ab01b26 100644
52984--- a/drivers/video/aty/mach64_cursor.c
52985+++ b/drivers/video/aty/mach64_cursor.c
52986@@ -8,6 +8,7 @@
52987 #include "../fb_draw.h"
52988
52989 #include <asm/io.h>
52990+#include <asm/pgtable.h>
52991
52992 #ifdef __sparc__
52993 #include <asm/fbio.h>
52994@@ -218,7 +219,9 @@ int aty_init_cursor(struct fb_info *info)
52995 info->sprite.buf_align = 16; /* and 64 lines tall. */
52996 info->sprite.flags = FB_PIXMAP_IO;
52997
52998- info->fbops->fb_cursor = atyfb_cursor;
52999+ pax_open_kernel();
53000+ *(void **)&info->fbops->fb_cursor = atyfb_cursor;
53001+ pax_close_kernel();
53002
53003 return 0;
53004 }
53005diff --git a/drivers/video/backlight/kb3886_bl.c b/drivers/video/backlight/kb3886_bl.c
53006index 84a110a..96312c3 100644
53007--- a/drivers/video/backlight/kb3886_bl.c
53008+++ b/drivers/video/backlight/kb3886_bl.c
53009@@ -78,7 +78,7 @@ static struct kb3886bl_machinfo *bl_machinfo;
53010 static unsigned long kb3886bl_flags;
53011 #define KB3886BL_SUSPENDED 0x01
53012
53013-static struct dmi_system_id kb3886bl_device_table[] __initdata = {
53014+static const struct dmi_system_id kb3886bl_device_table[] __initconst = {
53015 {
53016 .ident = "Sahara Touch-iT",
53017 .matches = {
53018diff --git a/drivers/video/fb_defio.c b/drivers/video/fb_defio.c
53019index 900aa4e..6d49418 100644
53020--- a/drivers/video/fb_defio.c
53021+++ b/drivers/video/fb_defio.c
53022@@ -206,7 +206,9 @@ void fb_deferred_io_init(struct fb_info *info)
53023
53024 BUG_ON(!fbdefio);
53025 mutex_init(&fbdefio->lock);
53026- info->fbops->fb_mmap = fb_deferred_io_mmap;
53027+ pax_open_kernel();
53028+ *(void **)&info->fbops->fb_mmap = fb_deferred_io_mmap;
53029+ pax_close_kernel();
53030 INIT_DELAYED_WORK(&info->deferred_work, fb_deferred_io_work);
53031 INIT_LIST_HEAD(&fbdefio->pagelist);
53032 if (fbdefio->delay == 0) /* set a default of 1 s */
53033@@ -237,7 +239,7 @@ void fb_deferred_io_cleanup(struct fb_info *info)
53034 page->mapping = NULL;
53035 }
53036
53037- info->fbops->fb_mmap = NULL;
53038+ *(void **)&info->fbops->fb_mmap = NULL;
53039 mutex_destroy(&fbdefio->lock);
53040 }
53041 EXPORT_SYMBOL_GPL(fb_deferred_io_cleanup);
53042diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
53043index 7309ac7..be3c49c 100644
53044--- a/drivers/video/fbmem.c
53045+++ b/drivers/video/fbmem.c
53046@@ -433,7 +433,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
53047 image->dx += image->width + 8;
53048 }
53049 } else if (rotate == FB_ROTATE_UD) {
53050- for (x = 0; x < num && image->dx >= 0; x++) {
53051+ for (x = 0; x < num && (__s32)image->dx >= 0; x++) {
53052 info->fbops->fb_imageblit(info, image);
53053 image->dx -= image->width + 8;
53054 }
53055@@ -445,7 +445,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
53056 image->dy += image->height + 8;
53057 }
53058 } else if (rotate == FB_ROTATE_CCW) {
53059- for (x = 0; x < num && image->dy >= 0; x++) {
53060+ for (x = 0; x < num && (__s32)image->dy >= 0; x++) {
53061 info->fbops->fb_imageblit(info, image);
53062 image->dy -= image->height + 8;
53063 }
53064@@ -1179,7 +1179,7 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
53065 return -EFAULT;
53066 if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES)
53067 return -EINVAL;
53068- if (con2fb.framebuffer < 0 || con2fb.framebuffer >= FB_MAX)
53069+ if (con2fb.framebuffer >= FB_MAX)
53070 return -EINVAL;
53071 if (!registered_fb[con2fb.framebuffer])
53072 request_module("fb%d", con2fb.framebuffer);
53073@@ -1300,7 +1300,7 @@ static int do_fscreeninfo_to_user(struct fb_fix_screeninfo *fix,
53074 __u32 data;
53075 int err;
53076
53077- err = copy_to_user(&fix32->id, &fix->id, sizeof(fix32->id));
53078+ err = copy_to_user(fix32->id, &fix->id, sizeof(fix32->id));
53079
53080 data = (__u32) (unsigned long) fix->smem_start;
53081 err |= put_user(data, &fix32->smem_start);
53082diff --git a/drivers/video/hyperv_fb.c b/drivers/video/hyperv_fb.c
53083index 130708f..cdac1a9 100644
53084--- a/drivers/video/hyperv_fb.c
53085+++ b/drivers/video/hyperv_fb.c
53086@@ -233,7 +233,7 @@ static uint screen_fb_size;
53087 static inline int synthvid_send(struct hv_device *hdev,
53088 struct synthvid_msg *msg)
53089 {
53090- static atomic64_t request_id = ATOMIC64_INIT(0);
53091+ static atomic64_unchecked_t request_id = ATOMIC64_INIT(0);
53092 int ret;
53093
53094 msg->pipe_hdr.type = PIPE_MSG_DATA;
53095@@ -241,7 +241,7 @@ static inline int synthvid_send(struct hv_device *hdev,
53096
53097 ret = vmbus_sendpacket(hdev->channel, msg,
53098 msg->vid_hdr.size + sizeof(struct pipe_msg_hdr),
53099- atomic64_inc_return(&request_id),
53100+ atomic64_inc_return_unchecked(&request_id),
53101 VM_PKT_DATA_INBAND, 0);
53102
53103 if (ret)
53104diff --git a/drivers/video/i810/i810_accel.c b/drivers/video/i810/i810_accel.c
53105index 7672d2e..b56437f 100644
53106--- a/drivers/video/i810/i810_accel.c
53107+++ b/drivers/video/i810/i810_accel.c
53108@@ -73,6 +73,7 @@ static inline int wait_for_space(struct fb_info *info, u32 space)
53109 }
53110 }
53111 printk("ringbuffer lockup!!!\n");
53112+ printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
53113 i810_report_error(mmio);
53114 par->dev_flags |= LOCKUP;
53115 info->pixmap.scan_align = 1;
53116diff --git a/drivers/video/logo/logo_linux_clut224.ppm b/drivers/video/logo/logo_linux_clut224.ppm
53117index 3c14e43..2630570 100644
53118--- a/drivers/video/logo/logo_linux_clut224.ppm
53119+++ b/drivers/video/logo/logo_linux_clut224.ppm
53120@@ -2,1603 +2,1123 @@ P3
53121 # Standard 224-color Linux logo
53122 80 80
53123 255
53124- 0 0 0 0 0 0 0 0 0 0 0 0
53125- 0 0 0 0 0 0 0 0 0 0 0 0
53126- 0 0 0 0 0 0 0 0 0 0 0 0
53127- 0 0 0 0 0 0 0 0 0 0 0 0
53128- 0 0 0 0 0 0 0 0 0 0 0 0
53129- 0 0 0 0 0 0 0 0 0 0 0 0
53130- 0 0 0 0 0 0 0 0 0 0 0 0
53131- 0 0 0 0 0 0 0 0 0 0 0 0
53132- 0 0 0 0 0 0 0 0 0 0 0 0
53133- 6 6 6 6 6 6 10 10 10 10 10 10
53134- 10 10 10 6 6 6 6 6 6 6 6 6
53135- 0 0 0 0 0 0 0 0 0 0 0 0
53136- 0 0 0 0 0 0 0 0 0 0 0 0
53137- 0 0 0 0 0 0 0 0 0 0 0 0
53138- 0 0 0 0 0 0 0 0 0 0 0 0
53139- 0 0 0 0 0 0 0 0 0 0 0 0
53140- 0 0 0 0 0 0 0 0 0 0 0 0
53141- 0 0 0 0 0 0 0 0 0 0 0 0
53142- 0 0 0 0 0 0 0 0 0 0 0 0
53143- 0 0 0 0 0 0 0 0 0 0 0 0
53144- 0 0 0 0 0 0 0 0 0 0 0 0
53145- 0 0 0 0 0 0 0 0 0 0 0 0
53146- 0 0 0 0 0 0 0 0 0 0 0 0
53147- 0 0 0 0 0 0 0 0 0 0 0 0
53148- 0 0 0 0 0 0 0 0 0 0 0 0
53149- 0 0 0 0 0 0 0 0 0 0 0 0
53150- 0 0 0 0 0 0 0 0 0 0 0 0
53151- 0 0 0 0 0 0 0 0 0 0 0 0
53152- 0 0 0 6 6 6 10 10 10 14 14 14
53153- 22 22 22 26 26 26 30 30 30 34 34 34
53154- 30 30 30 30 30 30 26 26 26 18 18 18
53155- 14 14 14 10 10 10 6 6 6 0 0 0
53156- 0 0 0 0 0 0 0 0 0 0 0 0
53157- 0 0 0 0 0 0 0 0 0 0 0 0
53158- 0 0 0 0 0 0 0 0 0 0 0 0
53159- 0 0 0 0 0 0 0 0 0 0 0 0
53160- 0 0 0 0 0 0 0 0 0 0 0 0
53161- 0 0 0 0 0 0 0 0 0 0 0 0
53162- 0 0 0 0 0 0 0 0 0 0 0 0
53163- 0 0 0 0 0 0 0 0 0 0 0 0
53164- 0 0 0 0 0 0 0 0 0 0 0 0
53165- 0 0 0 0 0 1 0 0 1 0 0 0
53166- 0 0 0 0 0 0 0 0 0 0 0 0
53167- 0 0 0 0 0 0 0 0 0 0 0 0
53168- 0 0 0 0 0 0 0 0 0 0 0 0
53169- 0 0 0 0 0 0 0 0 0 0 0 0
53170- 0 0 0 0 0 0 0 0 0 0 0 0
53171- 0 0 0 0 0 0 0 0 0 0 0 0
53172- 6 6 6 14 14 14 26 26 26 42 42 42
53173- 54 54 54 66 66 66 78 78 78 78 78 78
53174- 78 78 78 74 74 74 66 66 66 54 54 54
53175- 42 42 42 26 26 26 18 18 18 10 10 10
53176- 6 6 6 0 0 0 0 0 0 0 0 0
53177- 0 0 0 0 0 0 0 0 0 0 0 0
53178- 0 0 0 0 0 0 0 0 0 0 0 0
53179- 0 0 0 0 0 0 0 0 0 0 0 0
53180- 0 0 0 0 0 0 0 0 0 0 0 0
53181- 0 0 0 0 0 0 0 0 0 0 0 0
53182- 0 0 0 0 0 0 0 0 0 0 0 0
53183- 0 0 0 0 0 0 0 0 0 0 0 0
53184- 0 0 0 0 0 0 0 0 0 0 0 0
53185- 0 0 1 0 0 0 0 0 0 0 0 0
53186- 0 0 0 0 0 0 0 0 0 0 0 0
53187- 0 0 0 0 0 0 0 0 0 0 0 0
53188- 0 0 0 0 0 0 0 0 0 0 0 0
53189- 0 0 0 0 0 0 0 0 0 0 0 0
53190- 0 0 0 0 0 0 0 0 0 0 0 0
53191- 0 0 0 0 0 0 0 0 0 10 10 10
53192- 22 22 22 42 42 42 66 66 66 86 86 86
53193- 66 66 66 38 38 38 38 38 38 22 22 22
53194- 26 26 26 34 34 34 54 54 54 66 66 66
53195- 86 86 86 70 70 70 46 46 46 26 26 26
53196- 14 14 14 6 6 6 0 0 0 0 0 0
53197- 0 0 0 0 0 0 0 0 0 0 0 0
53198- 0 0 0 0 0 0 0 0 0 0 0 0
53199- 0 0 0 0 0 0 0 0 0 0 0 0
53200- 0 0 0 0 0 0 0 0 0 0 0 0
53201- 0 0 0 0 0 0 0 0 0 0 0 0
53202- 0 0 0 0 0 0 0 0 0 0 0 0
53203- 0 0 0 0 0 0 0 0 0 0 0 0
53204- 0 0 0 0 0 0 0 0 0 0 0 0
53205- 0 0 1 0 0 1 0 0 1 0 0 0
53206- 0 0 0 0 0 0 0 0 0 0 0 0
53207- 0 0 0 0 0 0 0 0 0 0 0 0
53208- 0 0 0 0 0 0 0 0 0 0 0 0
53209- 0 0 0 0 0 0 0 0 0 0 0 0
53210- 0 0 0 0 0 0 0 0 0 0 0 0
53211- 0 0 0 0 0 0 10 10 10 26 26 26
53212- 50 50 50 82 82 82 58 58 58 6 6 6
53213- 2 2 6 2 2 6 2 2 6 2 2 6
53214- 2 2 6 2 2 6 2 2 6 2 2 6
53215- 6 6 6 54 54 54 86 86 86 66 66 66
53216- 38 38 38 18 18 18 6 6 6 0 0 0
53217- 0 0 0 0 0 0 0 0 0 0 0 0
53218- 0 0 0 0 0 0 0 0 0 0 0 0
53219- 0 0 0 0 0 0 0 0 0 0 0 0
53220- 0 0 0 0 0 0 0 0 0 0 0 0
53221- 0 0 0 0 0 0 0 0 0 0 0 0
53222- 0 0 0 0 0 0 0 0 0 0 0 0
53223- 0 0 0 0 0 0 0 0 0 0 0 0
53224- 0 0 0 0 0 0 0 0 0 0 0 0
53225- 0 0 0 0 0 0 0 0 0 0 0 0
53226- 0 0 0 0 0 0 0 0 0 0 0 0
53227- 0 0 0 0 0 0 0 0 0 0 0 0
53228- 0 0 0 0 0 0 0 0 0 0 0 0
53229- 0 0 0 0 0 0 0 0 0 0 0 0
53230- 0 0 0 0 0 0 0 0 0 0 0 0
53231- 0 0 0 6 6 6 22 22 22 50 50 50
53232- 78 78 78 34 34 34 2 2 6 2 2 6
53233- 2 2 6 2 2 6 2 2 6 2 2 6
53234- 2 2 6 2 2 6 2 2 6 2 2 6
53235- 2 2 6 2 2 6 6 6 6 70 70 70
53236- 78 78 78 46 46 46 22 22 22 6 6 6
53237- 0 0 0 0 0 0 0 0 0 0 0 0
53238- 0 0 0 0 0 0 0 0 0 0 0 0
53239- 0 0 0 0 0 0 0 0 0 0 0 0
53240- 0 0 0 0 0 0 0 0 0 0 0 0
53241- 0 0 0 0 0 0 0 0 0 0 0 0
53242- 0 0 0 0 0 0 0 0 0 0 0 0
53243- 0 0 0 0 0 0 0 0 0 0 0 0
53244- 0 0 0 0 0 0 0 0 0 0 0 0
53245- 0 0 1 0 0 1 0 0 1 0 0 0
53246- 0 0 0 0 0 0 0 0 0 0 0 0
53247- 0 0 0 0 0 0 0 0 0 0 0 0
53248- 0 0 0 0 0 0 0 0 0 0 0 0
53249- 0 0 0 0 0 0 0 0 0 0 0 0
53250- 0 0 0 0 0 0 0 0 0 0 0 0
53251- 6 6 6 18 18 18 42 42 42 82 82 82
53252- 26 26 26 2 2 6 2 2 6 2 2 6
53253- 2 2 6 2 2 6 2 2 6 2 2 6
53254- 2 2 6 2 2 6 2 2 6 14 14 14
53255- 46 46 46 34 34 34 6 6 6 2 2 6
53256- 42 42 42 78 78 78 42 42 42 18 18 18
53257- 6 6 6 0 0 0 0 0 0 0 0 0
53258- 0 0 0 0 0 0 0 0 0 0 0 0
53259- 0 0 0 0 0 0 0 0 0 0 0 0
53260- 0 0 0 0 0 0 0 0 0 0 0 0
53261- 0 0 0 0 0 0 0 0 0 0 0 0
53262- 0 0 0 0 0 0 0 0 0 0 0 0
53263- 0 0 0 0 0 0 0 0 0 0 0 0
53264- 0 0 0 0 0 0 0 0 0 0 0 0
53265- 0 0 1 0 0 0 0 0 1 0 0 0
53266- 0 0 0 0 0 0 0 0 0 0 0 0
53267- 0 0 0 0 0 0 0 0 0 0 0 0
53268- 0 0 0 0 0 0 0 0 0 0 0 0
53269- 0 0 0 0 0 0 0 0 0 0 0 0
53270- 0 0 0 0 0 0 0 0 0 0 0 0
53271- 10 10 10 30 30 30 66 66 66 58 58 58
53272- 2 2 6 2 2 6 2 2 6 2 2 6
53273- 2 2 6 2 2 6 2 2 6 2 2 6
53274- 2 2 6 2 2 6 2 2 6 26 26 26
53275- 86 86 86 101 101 101 46 46 46 10 10 10
53276- 2 2 6 58 58 58 70 70 70 34 34 34
53277- 10 10 10 0 0 0 0 0 0 0 0 0
53278- 0 0 0 0 0 0 0 0 0 0 0 0
53279- 0 0 0 0 0 0 0 0 0 0 0 0
53280- 0 0 0 0 0 0 0 0 0 0 0 0
53281- 0 0 0 0 0 0 0 0 0 0 0 0
53282- 0 0 0 0 0 0 0 0 0 0 0 0
53283- 0 0 0 0 0 0 0 0 0 0 0 0
53284- 0 0 0 0 0 0 0 0 0 0 0 0
53285- 0 0 1 0 0 1 0 0 1 0 0 0
53286- 0 0 0 0 0 0 0 0 0 0 0 0
53287- 0 0 0 0 0 0 0 0 0 0 0 0
53288- 0 0 0 0 0 0 0 0 0 0 0 0
53289- 0 0 0 0 0 0 0 0 0 0 0 0
53290- 0 0 0 0 0 0 0 0 0 0 0 0
53291- 14 14 14 42 42 42 86 86 86 10 10 10
53292- 2 2 6 2 2 6 2 2 6 2 2 6
53293- 2 2 6 2 2 6 2 2 6 2 2 6
53294- 2 2 6 2 2 6 2 2 6 30 30 30
53295- 94 94 94 94 94 94 58 58 58 26 26 26
53296- 2 2 6 6 6 6 78 78 78 54 54 54
53297- 22 22 22 6 6 6 0 0 0 0 0 0
53298- 0 0 0 0 0 0 0 0 0 0 0 0
53299- 0 0 0 0 0 0 0 0 0 0 0 0
53300- 0 0 0 0 0 0 0 0 0 0 0 0
53301- 0 0 0 0 0 0 0 0 0 0 0 0
53302- 0 0 0 0 0 0 0 0 0 0 0 0
53303- 0 0 0 0 0 0 0 0 0 0 0 0
53304- 0 0 0 0 0 0 0 0 0 0 0 0
53305- 0 0 0 0 0 0 0 0 0 0 0 0
53306- 0 0 0 0 0 0 0 0 0 0 0 0
53307- 0 0 0 0 0 0 0 0 0 0 0 0
53308- 0 0 0 0 0 0 0 0 0 0 0 0
53309- 0 0 0 0 0 0 0 0 0 0 0 0
53310- 0 0 0 0 0 0 0 0 0 6 6 6
53311- 22 22 22 62 62 62 62 62 62 2 2 6
53312- 2 2 6 2 2 6 2 2 6 2 2 6
53313- 2 2 6 2 2 6 2 2 6 2 2 6
53314- 2 2 6 2 2 6 2 2 6 26 26 26
53315- 54 54 54 38 38 38 18 18 18 10 10 10
53316- 2 2 6 2 2 6 34 34 34 82 82 82
53317- 38 38 38 14 14 14 0 0 0 0 0 0
53318- 0 0 0 0 0 0 0 0 0 0 0 0
53319- 0 0 0 0 0 0 0 0 0 0 0 0
53320- 0 0 0 0 0 0 0 0 0 0 0 0
53321- 0 0 0 0 0 0 0 0 0 0 0 0
53322- 0 0 0 0 0 0 0 0 0 0 0 0
53323- 0 0 0 0 0 0 0 0 0 0 0 0
53324- 0 0 0 0 0 0 0 0 0 0 0 0
53325- 0 0 0 0 0 1 0 0 1 0 0 0
53326- 0 0 0 0 0 0 0 0 0 0 0 0
53327- 0 0 0 0 0 0 0 0 0 0 0 0
53328- 0 0 0 0 0 0 0 0 0 0 0 0
53329- 0 0 0 0 0 0 0 0 0 0 0 0
53330- 0 0 0 0 0 0 0 0 0 6 6 6
53331- 30 30 30 78 78 78 30 30 30 2 2 6
53332- 2 2 6 2 2 6 2 2 6 2 2 6
53333- 2 2 6 2 2 6 2 2 6 2 2 6
53334- 2 2 6 2 2 6 2 2 6 10 10 10
53335- 10 10 10 2 2 6 2 2 6 2 2 6
53336- 2 2 6 2 2 6 2 2 6 78 78 78
53337- 50 50 50 18 18 18 6 6 6 0 0 0
53338- 0 0 0 0 0 0 0 0 0 0 0 0
53339- 0 0 0 0 0 0 0 0 0 0 0 0
53340- 0 0 0 0 0 0 0 0 0 0 0 0
53341- 0 0 0 0 0 0 0 0 0 0 0 0
53342- 0 0 0 0 0 0 0 0 0 0 0 0
53343- 0 0 0 0 0 0 0 0 0 0 0 0
53344- 0 0 0 0 0 0 0 0 0 0 0 0
53345- 0 0 1 0 0 0 0 0 0 0 0 0
53346- 0 0 0 0 0 0 0 0 0 0 0 0
53347- 0 0 0 0 0 0 0 0 0 0 0 0
53348- 0 0 0 0 0 0 0 0 0 0 0 0
53349- 0 0 0 0 0 0 0 0 0 0 0 0
53350- 0 0 0 0 0 0 0 0 0 10 10 10
53351- 38 38 38 86 86 86 14 14 14 2 2 6
53352- 2 2 6 2 2 6 2 2 6 2 2 6
53353- 2 2 6 2 2 6 2 2 6 2 2 6
53354- 2 2 6 2 2 6 2 2 6 2 2 6
53355- 2 2 6 2 2 6 2 2 6 2 2 6
53356- 2 2 6 2 2 6 2 2 6 54 54 54
53357- 66 66 66 26 26 26 6 6 6 0 0 0
53358- 0 0 0 0 0 0 0 0 0 0 0 0
53359- 0 0 0 0 0 0 0 0 0 0 0 0
53360- 0 0 0 0 0 0 0 0 0 0 0 0
53361- 0 0 0 0 0 0 0 0 0 0 0 0
53362- 0 0 0 0 0 0 0 0 0 0 0 0
53363- 0 0 0 0 0 0 0 0 0 0 0 0
53364- 0 0 0 0 0 0 0 0 0 0 0 0
53365- 0 0 0 0 0 1 0 0 1 0 0 0
53366- 0 0 0 0 0 0 0 0 0 0 0 0
53367- 0 0 0 0 0 0 0 0 0 0 0 0
53368- 0 0 0 0 0 0 0 0 0 0 0 0
53369- 0 0 0 0 0 0 0 0 0 0 0 0
53370- 0 0 0 0 0 0 0 0 0 14 14 14
53371- 42 42 42 82 82 82 2 2 6 2 2 6
53372- 2 2 6 6 6 6 10 10 10 2 2 6
53373- 2 2 6 2 2 6 2 2 6 2 2 6
53374- 2 2 6 2 2 6 2 2 6 6 6 6
53375- 14 14 14 10 10 10 2 2 6 2 2 6
53376- 2 2 6 2 2 6 2 2 6 18 18 18
53377- 82 82 82 34 34 34 10 10 10 0 0 0
53378- 0 0 0 0 0 0 0 0 0 0 0 0
53379- 0 0 0 0 0 0 0 0 0 0 0 0
53380- 0 0 0 0 0 0 0 0 0 0 0 0
53381- 0 0 0 0 0 0 0 0 0 0 0 0
53382- 0 0 0 0 0 0 0 0 0 0 0 0
53383- 0 0 0 0 0 0 0 0 0 0 0 0
53384- 0 0 0 0 0 0 0 0 0 0 0 0
53385- 0 0 1 0 0 0 0 0 0 0 0 0
53386- 0 0 0 0 0 0 0 0 0 0 0 0
53387- 0 0 0 0 0 0 0 0 0 0 0 0
53388- 0 0 0 0 0 0 0 0 0 0 0 0
53389- 0 0 0 0 0 0 0 0 0 0 0 0
53390- 0 0 0 0 0 0 0 0 0 14 14 14
53391- 46 46 46 86 86 86 2 2 6 2 2 6
53392- 6 6 6 6 6 6 22 22 22 34 34 34
53393- 6 6 6 2 2 6 2 2 6 2 2 6
53394- 2 2 6 2 2 6 18 18 18 34 34 34
53395- 10 10 10 50 50 50 22 22 22 2 2 6
53396- 2 2 6 2 2 6 2 2 6 10 10 10
53397- 86 86 86 42 42 42 14 14 14 0 0 0
53398- 0 0 0 0 0 0 0 0 0 0 0 0
53399- 0 0 0 0 0 0 0 0 0 0 0 0
53400- 0 0 0 0 0 0 0 0 0 0 0 0
53401- 0 0 0 0 0 0 0 0 0 0 0 0
53402- 0 0 0 0 0 0 0 0 0 0 0 0
53403- 0 0 0 0 0 0 0 0 0 0 0 0
53404- 0 0 0 0 0 0 0 0 0 0 0 0
53405- 0 0 1 0 0 1 0 0 1 0 0 0
53406- 0 0 0 0 0 0 0 0 0 0 0 0
53407- 0 0 0 0 0 0 0 0 0 0 0 0
53408- 0 0 0 0 0 0 0 0 0 0 0 0
53409- 0 0 0 0 0 0 0 0 0 0 0 0
53410- 0 0 0 0 0 0 0 0 0 14 14 14
53411- 46 46 46 86 86 86 2 2 6 2 2 6
53412- 38 38 38 116 116 116 94 94 94 22 22 22
53413- 22 22 22 2 2 6 2 2 6 2 2 6
53414- 14 14 14 86 86 86 138 138 138 162 162 162
53415-154 154 154 38 38 38 26 26 26 6 6 6
53416- 2 2 6 2 2 6 2 2 6 2 2 6
53417- 86 86 86 46 46 46 14 14 14 0 0 0
53418- 0 0 0 0 0 0 0 0 0 0 0 0
53419- 0 0 0 0 0 0 0 0 0 0 0 0
53420- 0 0 0 0 0 0 0 0 0 0 0 0
53421- 0 0 0 0 0 0 0 0 0 0 0 0
53422- 0 0 0 0 0 0 0 0 0 0 0 0
53423- 0 0 0 0 0 0 0 0 0 0 0 0
53424- 0 0 0 0 0 0 0 0 0 0 0 0
53425- 0 0 0 0 0 0 0 0 0 0 0 0
53426- 0 0 0 0 0 0 0 0 0 0 0 0
53427- 0 0 0 0 0 0 0 0 0 0 0 0
53428- 0 0 0 0 0 0 0 0 0 0 0 0
53429- 0 0 0 0 0 0 0 0 0 0 0 0
53430- 0 0 0 0 0 0 0 0 0 14 14 14
53431- 46 46 46 86 86 86 2 2 6 14 14 14
53432-134 134 134 198 198 198 195 195 195 116 116 116
53433- 10 10 10 2 2 6 2 2 6 6 6 6
53434-101 98 89 187 187 187 210 210 210 218 218 218
53435-214 214 214 134 134 134 14 14 14 6 6 6
53436- 2 2 6 2 2 6 2 2 6 2 2 6
53437- 86 86 86 50 50 50 18 18 18 6 6 6
53438- 0 0 0 0 0 0 0 0 0 0 0 0
53439- 0 0 0 0 0 0 0 0 0 0 0 0
53440- 0 0 0 0 0 0 0 0 0 0 0 0
53441- 0 0 0 0 0 0 0 0 0 0 0 0
53442- 0 0 0 0 0 0 0 0 0 0 0 0
53443- 0 0 0 0 0 0 0 0 0 0 0 0
53444- 0 0 0 0 0 0 0 0 1 0 0 0
53445- 0 0 1 0 0 1 0 0 1 0 0 0
53446- 0 0 0 0 0 0 0 0 0 0 0 0
53447- 0 0 0 0 0 0 0 0 0 0 0 0
53448- 0 0 0 0 0 0 0 0 0 0 0 0
53449- 0 0 0 0 0 0 0 0 0 0 0 0
53450- 0 0 0 0 0 0 0 0 0 14 14 14
53451- 46 46 46 86 86 86 2 2 6 54 54 54
53452-218 218 218 195 195 195 226 226 226 246 246 246
53453- 58 58 58 2 2 6 2 2 6 30 30 30
53454-210 210 210 253 253 253 174 174 174 123 123 123
53455-221 221 221 234 234 234 74 74 74 2 2 6
53456- 2 2 6 2 2 6 2 2 6 2 2 6
53457- 70 70 70 58 58 58 22 22 22 6 6 6
53458- 0 0 0 0 0 0 0 0 0 0 0 0
53459- 0 0 0 0 0 0 0 0 0 0 0 0
53460- 0 0 0 0 0 0 0 0 0 0 0 0
53461- 0 0 0 0 0 0 0 0 0 0 0 0
53462- 0 0 0 0 0 0 0 0 0 0 0 0
53463- 0 0 0 0 0 0 0 0 0 0 0 0
53464- 0 0 0 0 0 0 0 0 0 0 0 0
53465- 0 0 0 0 0 0 0 0 0 0 0 0
53466- 0 0 0 0 0 0 0 0 0 0 0 0
53467- 0 0 0 0 0 0 0 0 0 0 0 0
53468- 0 0 0 0 0 0 0 0 0 0 0 0
53469- 0 0 0 0 0 0 0 0 0 0 0 0
53470- 0 0 0 0 0 0 0 0 0 14 14 14
53471- 46 46 46 82 82 82 2 2 6 106 106 106
53472-170 170 170 26 26 26 86 86 86 226 226 226
53473-123 123 123 10 10 10 14 14 14 46 46 46
53474-231 231 231 190 190 190 6 6 6 70 70 70
53475- 90 90 90 238 238 238 158 158 158 2 2 6
53476- 2 2 6 2 2 6 2 2 6 2 2 6
53477- 70 70 70 58 58 58 22 22 22 6 6 6
53478- 0 0 0 0 0 0 0 0 0 0 0 0
53479- 0 0 0 0 0 0 0 0 0 0 0 0
53480- 0 0 0 0 0 0 0 0 0 0 0 0
53481- 0 0 0 0 0 0 0 0 0 0 0 0
53482- 0 0 0 0 0 0 0 0 0 0 0 0
53483- 0 0 0 0 0 0 0 0 0 0 0 0
53484- 0 0 0 0 0 0 0 0 1 0 0 0
53485- 0 0 1 0 0 1 0 0 1 0 0 0
53486- 0 0 0 0 0 0 0 0 0 0 0 0
53487- 0 0 0 0 0 0 0 0 0 0 0 0
53488- 0 0 0 0 0 0 0 0 0 0 0 0
53489- 0 0 0 0 0 0 0 0 0 0 0 0
53490- 0 0 0 0 0 0 0 0 0 14 14 14
53491- 42 42 42 86 86 86 6 6 6 116 116 116
53492-106 106 106 6 6 6 70 70 70 149 149 149
53493-128 128 128 18 18 18 38 38 38 54 54 54
53494-221 221 221 106 106 106 2 2 6 14 14 14
53495- 46 46 46 190 190 190 198 198 198 2 2 6
53496- 2 2 6 2 2 6 2 2 6 2 2 6
53497- 74 74 74 62 62 62 22 22 22 6 6 6
53498- 0 0 0 0 0 0 0 0 0 0 0 0
53499- 0 0 0 0 0 0 0 0 0 0 0 0
53500- 0 0 0 0 0 0 0 0 0 0 0 0
53501- 0 0 0 0 0 0 0 0 0 0 0 0
53502- 0 0 0 0 0 0 0 0 0 0 0 0
53503- 0 0 0 0 0 0 0 0 0 0 0 0
53504- 0 0 0 0 0 0 0 0 1 0 0 0
53505- 0 0 1 0 0 0 0 0 1 0 0 0
53506- 0 0 0 0 0 0 0 0 0 0 0 0
53507- 0 0 0 0 0 0 0 0 0 0 0 0
53508- 0 0 0 0 0 0 0 0 0 0 0 0
53509- 0 0 0 0 0 0 0 0 0 0 0 0
53510- 0 0 0 0 0 0 0 0 0 14 14 14
53511- 42 42 42 94 94 94 14 14 14 101 101 101
53512-128 128 128 2 2 6 18 18 18 116 116 116
53513-118 98 46 121 92 8 121 92 8 98 78 10
53514-162 162 162 106 106 106 2 2 6 2 2 6
53515- 2 2 6 195 195 195 195 195 195 6 6 6
53516- 2 2 6 2 2 6 2 2 6 2 2 6
53517- 74 74 74 62 62 62 22 22 22 6 6 6
53518- 0 0 0 0 0 0 0 0 0 0 0 0
53519- 0 0 0 0 0 0 0 0 0 0 0 0
53520- 0 0 0 0 0 0 0 0 0 0 0 0
53521- 0 0 0 0 0 0 0 0 0 0 0 0
53522- 0 0 0 0 0 0 0 0 0 0 0 0
53523- 0 0 0 0 0 0 0 0 0 0 0 0
53524- 0 0 0 0 0 0 0 0 1 0 0 1
53525- 0 0 1 0 0 0 0 0 1 0 0 0
53526- 0 0 0 0 0 0 0 0 0 0 0 0
53527- 0 0 0 0 0 0 0 0 0 0 0 0
53528- 0 0 0 0 0 0 0 0 0 0 0 0
53529- 0 0 0 0 0 0 0 0 0 0 0 0
53530- 0 0 0 0 0 0 0 0 0 10 10 10
53531- 38 38 38 90 90 90 14 14 14 58 58 58
53532-210 210 210 26 26 26 54 38 6 154 114 10
53533-226 170 11 236 186 11 225 175 15 184 144 12
53534-215 174 15 175 146 61 37 26 9 2 2 6
53535- 70 70 70 246 246 246 138 138 138 2 2 6
53536- 2 2 6 2 2 6 2 2 6 2 2 6
53537- 70 70 70 66 66 66 26 26 26 6 6 6
53538- 0 0 0 0 0 0 0 0 0 0 0 0
53539- 0 0 0 0 0 0 0 0 0 0 0 0
53540- 0 0 0 0 0 0 0 0 0 0 0 0
53541- 0 0 0 0 0 0 0 0 0 0 0 0
53542- 0 0 0 0 0 0 0 0 0 0 0 0
53543- 0 0 0 0 0 0 0 0 0 0 0 0
53544- 0 0 0 0 0 0 0 0 0 0 0 0
53545- 0 0 0 0 0 0 0 0 0 0 0 0
53546- 0 0 0 0 0 0 0 0 0 0 0 0
53547- 0 0 0 0 0 0 0 0 0 0 0 0
53548- 0 0 0 0 0 0 0 0 0 0 0 0
53549- 0 0 0 0 0 0 0 0 0 0 0 0
53550- 0 0 0 0 0 0 0 0 0 10 10 10
53551- 38 38 38 86 86 86 14 14 14 10 10 10
53552-195 195 195 188 164 115 192 133 9 225 175 15
53553-239 182 13 234 190 10 232 195 16 232 200 30
53554-245 207 45 241 208 19 232 195 16 184 144 12
53555-218 194 134 211 206 186 42 42 42 2 2 6
53556- 2 2 6 2 2 6 2 2 6 2 2 6
53557- 50 50 50 74 74 74 30 30 30 6 6 6
53558- 0 0 0 0 0 0 0 0 0 0 0 0
53559- 0 0 0 0 0 0 0 0 0 0 0 0
53560- 0 0 0 0 0 0 0 0 0 0 0 0
53561- 0 0 0 0 0 0 0 0 0 0 0 0
53562- 0 0 0 0 0 0 0 0 0 0 0 0
53563- 0 0 0 0 0 0 0 0 0 0 0 0
53564- 0 0 0 0 0 0 0 0 0 0 0 0
53565- 0 0 0 0 0 0 0 0 0 0 0 0
53566- 0 0 0 0 0 0 0 0 0 0 0 0
53567- 0 0 0 0 0 0 0 0 0 0 0 0
53568- 0 0 0 0 0 0 0 0 0 0 0 0
53569- 0 0 0 0 0 0 0 0 0 0 0 0
53570- 0 0 0 0 0 0 0 0 0 10 10 10
53571- 34 34 34 86 86 86 14 14 14 2 2 6
53572-121 87 25 192 133 9 219 162 10 239 182 13
53573-236 186 11 232 195 16 241 208 19 244 214 54
53574-246 218 60 246 218 38 246 215 20 241 208 19
53575-241 208 19 226 184 13 121 87 25 2 2 6
53576- 2 2 6 2 2 6 2 2 6 2 2 6
53577- 50 50 50 82 82 82 34 34 34 10 10 10
53578- 0 0 0 0 0 0 0 0 0 0 0 0
53579- 0 0 0 0 0 0 0 0 0 0 0 0
53580- 0 0 0 0 0 0 0 0 0 0 0 0
53581- 0 0 0 0 0 0 0 0 0 0 0 0
53582- 0 0 0 0 0 0 0 0 0 0 0 0
53583- 0 0 0 0 0 0 0 0 0 0 0 0
53584- 0 0 0 0 0 0 0 0 0 0 0 0
53585- 0 0 0 0 0 0 0 0 0 0 0 0
53586- 0 0 0 0 0 0 0 0 0 0 0 0
53587- 0 0 0 0 0 0 0 0 0 0 0 0
53588- 0 0 0 0 0 0 0 0 0 0 0 0
53589- 0 0 0 0 0 0 0 0 0 0 0 0
53590- 0 0 0 0 0 0 0 0 0 10 10 10
53591- 34 34 34 82 82 82 30 30 30 61 42 6
53592-180 123 7 206 145 10 230 174 11 239 182 13
53593-234 190 10 238 202 15 241 208 19 246 218 74
53594-246 218 38 246 215 20 246 215 20 246 215 20
53595-226 184 13 215 174 15 184 144 12 6 6 6
53596- 2 2 6 2 2 6 2 2 6 2 2 6
53597- 26 26 26 94 94 94 42 42 42 14 14 14
53598- 0 0 0 0 0 0 0 0 0 0 0 0
53599- 0 0 0 0 0 0 0 0 0 0 0 0
53600- 0 0 0 0 0 0 0 0 0 0 0 0
53601- 0 0 0 0 0 0 0 0 0 0 0 0
53602- 0 0 0 0 0 0 0 0 0 0 0 0
53603- 0 0 0 0 0 0 0 0 0 0 0 0
53604- 0 0 0 0 0 0 0 0 0 0 0 0
53605- 0 0 0 0 0 0 0 0 0 0 0 0
53606- 0 0 0 0 0 0 0 0 0 0 0 0
53607- 0 0 0 0 0 0 0 0 0 0 0 0
53608- 0 0 0 0 0 0 0 0 0 0 0 0
53609- 0 0 0 0 0 0 0 0 0 0 0 0
53610- 0 0 0 0 0 0 0 0 0 10 10 10
53611- 30 30 30 78 78 78 50 50 50 104 69 6
53612-192 133 9 216 158 10 236 178 12 236 186 11
53613-232 195 16 241 208 19 244 214 54 245 215 43
53614-246 215 20 246 215 20 241 208 19 198 155 10
53615-200 144 11 216 158 10 156 118 10 2 2 6
53616- 2 2 6 2 2 6 2 2 6 2 2 6
53617- 6 6 6 90 90 90 54 54 54 18 18 18
53618- 6 6 6 0 0 0 0 0 0 0 0 0
53619- 0 0 0 0 0 0 0 0 0 0 0 0
53620- 0 0 0 0 0 0 0 0 0 0 0 0
53621- 0 0 0 0 0 0 0 0 0 0 0 0
53622- 0 0 0 0 0 0 0 0 0 0 0 0
53623- 0 0 0 0 0 0 0 0 0 0 0 0
53624- 0 0 0 0 0 0 0 0 0 0 0 0
53625- 0 0 0 0 0 0 0 0 0 0 0 0
53626- 0 0 0 0 0 0 0 0 0 0 0 0
53627- 0 0 0 0 0 0 0 0 0 0 0 0
53628- 0 0 0 0 0 0 0 0 0 0 0 0
53629- 0 0 0 0 0 0 0 0 0 0 0 0
53630- 0 0 0 0 0 0 0 0 0 10 10 10
53631- 30 30 30 78 78 78 46 46 46 22 22 22
53632-137 92 6 210 162 10 239 182 13 238 190 10
53633-238 202 15 241 208 19 246 215 20 246 215 20
53634-241 208 19 203 166 17 185 133 11 210 150 10
53635-216 158 10 210 150 10 102 78 10 2 2 6
53636- 6 6 6 54 54 54 14 14 14 2 2 6
53637- 2 2 6 62 62 62 74 74 74 30 30 30
53638- 10 10 10 0 0 0 0 0 0 0 0 0
53639- 0 0 0 0 0 0 0 0 0 0 0 0
53640- 0 0 0 0 0 0 0 0 0 0 0 0
53641- 0 0 0 0 0 0 0 0 0 0 0 0
53642- 0 0 0 0 0 0 0 0 0 0 0 0
53643- 0 0 0 0 0 0 0 0 0 0 0 0
53644- 0 0 0 0 0 0 0 0 0 0 0 0
53645- 0 0 0 0 0 0 0 0 0 0 0 0
53646- 0 0 0 0 0 0 0 0 0 0 0 0
53647- 0 0 0 0 0 0 0 0 0 0 0 0
53648- 0 0 0 0 0 0 0 0 0 0 0 0
53649- 0 0 0 0 0 0 0 0 0 0 0 0
53650- 0 0 0 0 0 0 0 0 0 10 10 10
53651- 34 34 34 78 78 78 50 50 50 6 6 6
53652- 94 70 30 139 102 15 190 146 13 226 184 13
53653-232 200 30 232 195 16 215 174 15 190 146 13
53654-168 122 10 192 133 9 210 150 10 213 154 11
53655-202 150 34 182 157 106 101 98 89 2 2 6
53656- 2 2 6 78 78 78 116 116 116 58 58 58
53657- 2 2 6 22 22 22 90 90 90 46 46 46
53658- 18 18 18 6 6 6 0 0 0 0 0 0
53659- 0 0 0 0 0 0 0 0 0 0 0 0
53660- 0 0 0 0 0 0 0 0 0 0 0 0
53661- 0 0 0 0 0 0 0 0 0 0 0 0
53662- 0 0 0 0 0 0 0 0 0 0 0 0
53663- 0 0 0 0 0 0 0 0 0 0 0 0
53664- 0 0 0 0 0 0 0 0 0 0 0 0
53665- 0 0 0 0 0 0 0 0 0 0 0 0
53666- 0 0 0 0 0 0 0 0 0 0 0 0
53667- 0 0 0 0 0 0 0 0 0 0 0 0
53668- 0 0 0 0 0 0 0 0 0 0 0 0
53669- 0 0 0 0 0 0 0 0 0 0 0 0
53670- 0 0 0 0 0 0 0 0 0 10 10 10
53671- 38 38 38 86 86 86 50 50 50 6 6 6
53672-128 128 128 174 154 114 156 107 11 168 122 10
53673-198 155 10 184 144 12 197 138 11 200 144 11
53674-206 145 10 206 145 10 197 138 11 188 164 115
53675-195 195 195 198 198 198 174 174 174 14 14 14
53676- 2 2 6 22 22 22 116 116 116 116 116 116
53677- 22 22 22 2 2 6 74 74 74 70 70 70
53678- 30 30 30 10 10 10 0 0 0 0 0 0
53679- 0 0 0 0 0 0 0 0 0 0 0 0
53680- 0 0 0 0 0 0 0 0 0 0 0 0
53681- 0 0 0 0 0 0 0 0 0 0 0 0
53682- 0 0 0 0 0 0 0 0 0 0 0 0
53683- 0 0 0 0 0 0 0 0 0 0 0 0
53684- 0 0 0 0 0 0 0 0 0 0 0 0
53685- 0 0 0 0 0 0 0 0 0 0 0 0
53686- 0 0 0 0 0 0 0 0 0 0 0 0
53687- 0 0 0 0 0 0 0 0 0 0 0 0
53688- 0 0 0 0 0 0 0 0 0 0 0 0
53689- 0 0 0 0 0 0 0 0 0 0 0 0
53690- 0 0 0 0 0 0 6 6 6 18 18 18
53691- 50 50 50 101 101 101 26 26 26 10 10 10
53692-138 138 138 190 190 190 174 154 114 156 107 11
53693-197 138 11 200 144 11 197 138 11 192 133 9
53694-180 123 7 190 142 34 190 178 144 187 187 187
53695-202 202 202 221 221 221 214 214 214 66 66 66
53696- 2 2 6 2 2 6 50 50 50 62 62 62
53697- 6 6 6 2 2 6 10 10 10 90 90 90
53698- 50 50 50 18 18 18 6 6 6 0 0 0
53699- 0 0 0 0 0 0 0 0 0 0 0 0
53700- 0 0 0 0 0 0 0 0 0 0 0 0
53701- 0 0 0 0 0 0 0 0 0 0 0 0
53702- 0 0 0 0 0 0 0 0 0 0 0 0
53703- 0 0 0 0 0 0 0 0 0 0 0 0
53704- 0 0 0 0 0 0 0 0 0 0 0 0
53705- 0 0 0 0 0 0 0 0 0 0 0 0
53706- 0 0 0 0 0 0 0 0 0 0 0 0
53707- 0 0 0 0 0 0 0 0 0 0 0 0
53708- 0 0 0 0 0 0 0 0 0 0 0 0
53709- 0 0 0 0 0 0 0 0 0 0 0 0
53710- 0 0 0 0 0 0 10 10 10 34 34 34
53711- 74 74 74 74 74 74 2 2 6 6 6 6
53712-144 144 144 198 198 198 190 190 190 178 166 146
53713-154 121 60 156 107 11 156 107 11 168 124 44
53714-174 154 114 187 187 187 190 190 190 210 210 210
53715-246 246 246 253 253 253 253 253 253 182 182 182
53716- 6 6 6 2 2 6 2 2 6 2 2 6
53717- 2 2 6 2 2 6 2 2 6 62 62 62
53718- 74 74 74 34 34 34 14 14 14 0 0 0
53719- 0 0 0 0 0 0 0 0 0 0 0 0
53720- 0 0 0 0 0 0 0 0 0 0 0 0
53721- 0 0 0 0 0 0 0 0 0 0 0 0
53722- 0 0 0 0 0 0 0 0 0 0 0 0
53723- 0 0 0 0 0 0 0 0 0 0 0 0
53724- 0 0 0 0 0 0 0 0 0 0 0 0
53725- 0 0 0 0 0 0 0 0 0 0 0 0
53726- 0 0 0 0 0 0 0 0 0 0 0 0
53727- 0 0 0 0 0 0 0 0 0 0 0 0
53728- 0 0 0 0 0 0 0 0 0 0 0 0
53729- 0 0 0 0 0 0 0 0 0 0 0 0
53730- 0 0 0 10 10 10 22 22 22 54 54 54
53731- 94 94 94 18 18 18 2 2 6 46 46 46
53732-234 234 234 221 221 221 190 190 190 190 190 190
53733-190 190 190 187 187 187 187 187 187 190 190 190
53734-190 190 190 195 195 195 214 214 214 242 242 242
53735-253 253 253 253 253 253 253 253 253 253 253 253
53736- 82 82 82 2 2 6 2 2 6 2 2 6
53737- 2 2 6 2 2 6 2 2 6 14 14 14
53738- 86 86 86 54 54 54 22 22 22 6 6 6
53739- 0 0 0 0 0 0 0 0 0 0 0 0
53740- 0 0 0 0 0 0 0 0 0 0 0 0
53741- 0 0 0 0 0 0 0 0 0 0 0 0
53742- 0 0 0 0 0 0 0 0 0 0 0 0
53743- 0 0 0 0 0 0 0 0 0 0 0 0
53744- 0 0 0 0 0 0 0 0 0 0 0 0
53745- 0 0 0 0 0 0 0 0 0 0 0 0
53746- 0 0 0 0 0 0 0 0 0 0 0 0
53747- 0 0 0 0 0 0 0 0 0 0 0 0
53748- 0 0 0 0 0 0 0 0 0 0 0 0
53749- 0 0 0 0 0 0 0 0 0 0 0 0
53750- 6 6 6 18 18 18 46 46 46 90 90 90
53751- 46 46 46 18 18 18 6 6 6 182 182 182
53752-253 253 253 246 246 246 206 206 206 190 190 190
53753-190 190 190 190 190 190 190 190 190 190 190 190
53754-206 206 206 231 231 231 250 250 250 253 253 253
53755-253 253 253 253 253 253 253 253 253 253 253 253
53756-202 202 202 14 14 14 2 2 6 2 2 6
53757- 2 2 6 2 2 6 2 2 6 2 2 6
53758- 42 42 42 86 86 86 42 42 42 18 18 18
53759- 6 6 6 0 0 0 0 0 0 0 0 0
53760- 0 0 0 0 0 0 0 0 0 0 0 0
53761- 0 0 0 0 0 0 0 0 0 0 0 0
53762- 0 0 0 0 0 0 0 0 0 0 0 0
53763- 0 0 0 0 0 0 0 0 0 0 0 0
53764- 0 0 0 0 0 0 0 0 0 0 0 0
53765- 0 0 0 0 0 0 0 0 0 0 0 0
53766- 0 0 0 0 0 0 0 0 0 0 0 0
53767- 0 0 0 0 0 0 0 0 0 0 0 0
53768- 0 0 0 0 0 0 0 0 0 0 0 0
53769- 0 0 0 0 0 0 0 0 0 6 6 6
53770- 14 14 14 38 38 38 74 74 74 66 66 66
53771- 2 2 6 6 6 6 90 90 90 250 250 250
53772-253 253 253 253 253 253 238 238 238 198 198 198
53773-190 190 190 190 190 190 195 195 195 221 221 221
53774-246 246 246 253 253 253 253 253 253 253 253 253
53775-253 253 253 253 253 253 253 253 253 253 253 253
53776-253 253 253 82 82 82 2 2 6 2 2 6
53777- 2 2 6 2 2 6 2 2 6 2 2 6
53778- 2 2 6 78 78 78 70 70 70 34 34 34
53779- 14 14 14 6 6 6 0 0 0 0 0 0
53780- 0 0 0 0 0 0 0 0 0 0 0 0
53781- 0 0 0 0 0 0 0 0 0 0 0 0
53782- 0 0 0 0 0 0 0 0 0 0 0 0
53783- 0 0 0 0 0 0 0 0 0 0 0 0
53784- 0 0 0 0 0 0 0 0 0 0 0 0
53785- 0 0 0 0 0 0 0 0 0 0 0 0
53786- 0 0 0 0 0 0 0 0 0 0 0 0
53787- 0 0 0 0 0 0 0 0 0 0 0 0
53788- 0 0 0 0 0 0 0 0 0 0 0 0
53789- 0 0 0 0 0 0 0 0 0 14 14 14
53790- 34 34 34 66 66 66 78 78 78 6 6 6
53791- 2 2 6 18 18 18 218 218 218 253 253 253
53792-253 253 253 253 253 253 253 253 253 246 246 246
53793-226 226 226 231 231 231 246 246 246 253 253 253
53794-253 253 253 253 253 253 253 253 253 253 253 253
53795-253 253 253 253 253 253 253 253 253 253 253 253
53796-253 253 253 178 178 178 2 2 6 2 2 6
53797- 2 2 6 2 2 6 2 2 6 2 2 6
53798- 2 2 6 18 18 18 90 90 90 62 62 62
53799- 30 30 30 10 10 10 0 0 0 0 0 0
53800- 0 0 0 0 0 0 0 0 0 0 0 0
53801- 0 0 0 0 0 0 0 0 0 0 0 0
53802- 0 0 0 0 0 0 0 0 0 0 0 0
53803- 0 0 0 0 0 0 0 0 0 0 0 0
53804- 0 0 0 0 0 0 0 0 0 0 0 0
53805- 0 0 0 0 0 0 0 0 0 0 0 0
53806- 0 0 0 0 0 0 0 0 0 0 0 0
53807- 0 0 0 0 0 0 0 0 0 0 0 0
53808- 0 0 0 0 0 0 0 0 0 0 0 0
53809- 0 0 0 0 0 0 10 10 10 26 26 26
53810- 58 58 58 90 90 90 18 18 18 2 2 6
53811- 2 2 6 110 110 110 253 253 253 253 253 253
53812-253 253 253 253 253 253 253 253 253 253 253 253
53813-250 250 250 253 253 253 253 253 253 253 253 253
53814-253 253 253 253 253 253 253 253 253 253 253 253
53815-253 253 253 253 253 253 253 253 253 253 253 253
53816-253 253 253 231 231 231 18 18 18 2 2 6
53817- 2 2 6 2 2 6 2 2 6 2 2 6
53818- 2 2 6 2 2 6 18 18 18 94 94 94
53819- 54 54 54 26 26 26 10 10 10 0 0 0
53820- 0 0 0 0 0 0 0 0 0 0 0 0
53821- 0 0 0 0 0 0 0 0 0 0 0 0
53822- 0 0 0 0 0 0 0 0 0 0 0 0
53823- 0 0 0 0 0 0 0 0 0 0 0 0
53824- 0 0 0 0 0 0 0 0 0 0 0 0
53825- 0 0 0 0 0 0 0 0 0 0 0 0
53826- 0 0 0 0 0 0 0 0 0 0 0 0
53827- 0 0 0 0 0 0 0 0 0 0 0 0
53828- 0 0 0 0 0 0 0 0 0 0 0 0
53829- 0 0 0 6 6 6 22 22 22 50 50 50
53830- 90 90 90 26 26 26 2 2 6 2 2 6
53831- 14 14 14 195 195 195 250 250 250 253 253 253
53832-253 253 253 253 253 253 253 253 253 253 253 253
53833-253 253 253 253 253 253 253 253 253 253 253 253
53834-253 253 253 253 253 253 253 253 253 253 253 253
53835-253 253 253 253 253 253 253 253 253 253 253 253
53836-250 250 250 242 242 242 54 54 54 2 2 6
53837- 2 2 6 2 2 6 2 2 6 2 2 6
53838- 2 2 6 2 2 6 2 2 6 38 38 38
53839- 86 86 86 50 50 50 22 22 22 6 6 6
53840- 0 0 0 0 0 0 0 0 0 0 0 0
53841- 0 0 0 0 0 0 0 0 0 0 0 0
53842- 0 0 0 0 0 0 0 0 0 0 0 0
53843- 0 0 0 0 0 0 0 0 0 0 0 0
53844- 0 0 0 0 0 0 0 0 0 0 0 0
53845- 0 0 0 0 0 0 0 0 0 0 0 0
53846- 0 0 0 0 0 0 0 0 0 0 0 0
53847- 0 0 0 0 0 0 0 0 0 0 0 0
53848- 0 0 0 0 0 0 0 0 0 0 0 0
53849- 6 6 6 14 14 14 38 38 38 82 82 82
53850- 34 34 34 2 2 6 2 2 6 2 2 6
53851- 42 42 42 195 195 195 246 246 246 253 253 253
53852-253 253 253 253 253 253 253 253 253 250 250 250
53853-242 242 242 242 242 242 250 250 250 253 253 253
53854-253 253 253 253 253 253 253 253 253 253 253 253
53855-253 253 253 250 250 250 246 246 246 238 238 238
53856-226 226 226 231 231 231 101 101 101 6 6 6
53857- 2 2 6 2 2 6 2 2 6 2 2 6
53858- 2 2 6 2 2 6 2 2 6 2 2 6
53859- 38 38 38 82 82 82 42 42 42 14 14 14
53860- 6 6 6 0 0 0 0 0 0 0 0 0
53861- 0 0 0 0 0 0 0 0 0 0 0 0
53862- 0 0 0 0 0 0 0 0 0 0 0 0
53863- 0 0 0 0 0 0 0 0 0 0 0 0
53864- 0 0 0 0 0 0 0 0 0 0 0 0
53865- 0 0 0 0 0 0 0 0 0 0 0 0
53866- 0 0 0 0 0 0 0 0 0 0 0 0
53867- 0 0 0 0 0 0 0 0 0 0 0 0
53868- 0 0 0 0 0 0 0 0 0 0 0 0
53869- 10 10 10 26 26 26 62 62 62 66 66 66
53870- 2 2 6 2 2 6 2 2 6 6 6 6
53871- 70 70 70 170 170 170 206 206 206 234 234 234
53872-246 246 246 250 250 250 250 250 250 238 238 238
53873-226 226 226 231 231 231 238 238 238 250 250 250
53874-250 250 250 250 250 250 246 246 246 231 231 231
53875-214 214 214 206 206 206 202 202 202 202 202 202
53876-198 198 198 202 202 202 182 182 182 18 18 18
53877- 2 2 6 2 2 6 2 2 6 2 2 6
53878- 2 2 6 2 2 6 2 2 6 2 2 6
53879- 2 2 6 62 62 62 66 66 66 30 30 30
53880- 10 10 10 0 0 0 0 0 0 0 0 0
53881- 0 0 0 0 0 0 0 0 0 0 0 0
53882- 0 0 0 0 0 0 0 0 0 0 0 0
53883- 0 0 0 0 0 0 0 0 0 0 0 0
53884- 0 0 0 0 0 0 0 0 0 0 0 0
53885- 0 0 0 0 0 0 0 0 0 0 0 0
53886- 0 0 0 0 0 0 0 0 0 0 0 0
53887- 0 0 0 0 0 0 0 0 0 0 0 0
53888- 0 0 0 0 0 0 0 0 0 0 0 0
53889- 14 14 14 42 42 42 82 82 82 18 18 18
53890- 2 2 6 2 2 6 2 2 6 10 10 10
53891- 94 94 94 182 182 182 218 218 218 242 242 242
53892-250 250 250 253 253 253 253 253 253 250 250 250
53893-234 234 234 253 253 253 253 253 253 253 253 253
53894-253 253 253 253 253 253 253 253 253 246 246 246
53895-238 238 238 226 226 226 210 210 210 202 202 202
53896-195 195 195 195 195 195 210 210 210 158 158 158
53897- 6 6 6 14 14 14 50 50 50 14 14 14
53898- 2 2 6 2 2 6 2 2 6 2 2 6
53899- 2 2 6 6 6 6 86 86 86 46 46 46
53900- 18 18 18 6 6 6 0 0 0 0 0 0
53901- 0 0 0 0 0 0 0 0 0 0 0 0
53902- 0 0 0 0 0 0 0 0 0 0 0 0
53903- 0 0 0 0 0 0 0 0 0 0 0 0
53904- 0 0 0 0 0 0 0 0 0 0 0 0
53905- 0 0 0 0 0 0 0 0 0 0 0 0
53906- 0 0 0 0 0 0 0 0 0 0 0 0
53907- 0 0 0 0 0 0 0 0 0 0 0 0
53908- 0 0 0 0 0 0 0 0 0 6 6 6
53909- 22 22 22 54 54 54 70 70 70 2 2 6
53910- 2 2 6 10 10 10 2 2 6 22 22 22
53911-166 166 166 231 231 231 250 250 250 253 253 253
53912-253 253 253 253 253 253 253 253 253 250 250 250
53913-242 242 242 253 253 253 253 253 253 253 253 253
53914-253 253 253 253 253 253 253 253 253 253 253 253
53915-253 253 253 253 253 253 253 253 253 246 246 246
53916-231 231 231 206 206 206 198 198 198 226 226 226
53917- 94 94 94 2 2 6 6 6 6 38 38 38
53918- 30 30 30 2 2 6 2 2 6 2 2 6
53919- 2 2 6 2 2 6 62 62 62 66 66 66
53920- 26 26 26 10 10 10 0 0 0 0 0 0
53921- 0 0 0 0 0 0 0 0 0 0 0 0
53922- 0 0 0 0 0 0 0 0 0 0 0 0
53923- 0 0 0 0 0 0 0 0 0 0 0 0
53924- 0 0 0 0 0 0 0 0 0 0 0 0
53925- 0 0 0 0 0 0 0 0 0 0 0 0
53926- 0 0 0 0 0 0 0 0 0 0 0 0
53927- 0 0 0 0 0 0 0 0 0 0 0 0
53928- 0 0 0 0 0 0 0 0 0 10 10 10
53929- 30 30 30 74 74 74 50 50 50 2 2 6
53930- 26 26 26 26 26 26 2 2 6 106 106 106
53931-238 238 238 253 253 253 253 253 253 253 253 253
53932-253 253 253 253 253 253 253 253 253 253 253 253
53933-253 253 253 253 253 253 253 253 253 253 253 253
53934-253 253 253 253 253 253 253 253 253 253 253 253
53935-253 253 253 253 253 253 253 253 253 253 253 253
53936-253 253 253 246 246 246 218 218 218 202 202 202
53937-210 210 210 14 14 14 2 2 6 2 2 6
53938- 30 30 30 22 22 22 2 2 6 2 2 6
53939- 2 2 6 2 2 6 18 18 18 86 86 86
53940- 42 42 42 14 14 14 0 0 0 0 0 0
53941- 0 0 0 0 0 0 0 0 0 0 0 0
53942- 0 0 0 0 0 0 0 0 0 0 0 0
53943- 0 0 0 0 0 0 0 0 0 0 0 0
53944- 0 0 0 0 0 0 0 0 0 0 0 0
53945- 0 0 0 0 0 0 0 0 0 0 0 0
53946- 0 0 0 0 0 0 0 0 0 0 0 0
53947- 0 0 0 0 0 0 0 0 0 0 0 0
53948- 0 0 0 0 0 0 0 0 0 14 14 14
53949- 42 42 42 90 90 90 22 22 22 2 2 6
53950- 42 42 42 2 2 6 18 18 18 218 218 218
53951-253 253 253 253 253 253 253 253 253 253 253 253
53952-253 253 253 253 253 253 253 253 253 253 253 253
53953-253 253 253 253 253 253 253 253 253 253 253 253
53954-253 253 253 253 253 253 253 253 253 253 253 253
53955-253 253 253 253 253 253 253 253 253 253 253 253
53956-253 253 253 253 253 253 250 250 250 221 221 221
53957-218 218 218 101 101 101 2 2 6 14 14 14
53958- 18 18 18 38 38 38 10 10 10 2 2 6
53959- 2 2 6 2 2 6 2 2 6 78 78 78
53960- 58 58 58 22 22 22 6 6 6 0 0 0
53961- 0 0 0 0 0 0 0 0 0 0 0 0
53962- 0 0 0 0 0 0 0 0 0 0 0 0
53963- 0 0 0 0 0 0 0 0 0 0 0 0
53964- 0 0 0 0 0 0 0 0 0 0 0 0
53965- 0 0 0 0 0 0 0 0 0 0 0 0
53966- 0 0 0 0 0 0 0 0 0 0 0 0
53967- 0 0 0 0 0 0 0 0 0 0 0 0
53968- 0 0 0 0 0 0 6 6 6 18 18 18
53969- 54 54 54 82 82 82 2 2 6 26 26 26
53970- 22 22 22 2 2 6 123 123 123 253 253 253
53971-253 253 253 253 253 253 253 253 253 253 253 253
53972-253 253 253 253 253 253 253 253 253 253 253 253
53973-253 253 253 253 253 253 253 253 253 253 253 253
53974-253 253 253 253 253 253 253 253 253 253 253 253
53975-253 253 253 253 253 253 253 253 253 253 253 253
53976-253 253 253 253 253 253 253 253 253 250 250 250
53977-238 238 238 198 198 198 6 6 6 38 38 38
53978- 58 58 58 26 26 26 38 38 38 2 2 6
53979- 2 2 6 2 2 6 2 2 6 46 46 46
53980- 78 78 78 30 30 30 10 10 10 0 0 0
53981- 0 0 0 0 0 0 0 0 0 0 0 0
53982- 0 0 0 0 0 0 0 0 0 0 0 0
53983- 0 0 0 0 0 0 0 0 0 0 0 0
53984- 0 0 0 0 0 0 0 0 0 0 0 0
53985- 0 0 0 0 0 0 0 0 0 0 0 0
53986- 0 0 0 0 0 0 0 0 0 0 0 0
53987- 0 0 0 0 0 0 0 0 0 0 0 0
53988- 0 0 0 0 0 0 10 10 10 30 30 30
53989- 74 74 74 58 58 58 2 2 6 42 42 42
53990- 2 2 6 22 22 22 231 231 231 253 253 253
53991-253 253 253 253 253 253 253 253 253 253 253 253
53992-253 253 253 253 253 253 253 253 253 250 250 250
53993-253 253 253 253 253 253 253 253 253 253 253 253
53994-253 253 253 253 253 253 253 253 253 253 253 253
53995-253 253 253 253 253 253 253 253 253 253 253 253
53996-253 253 253 253 253 253 253 253 253 253 253 253
53997-253 253 253 246 246 246 46 46 46 38 38 38
53998- 42 42 42 14 14 14 38 38 38 14 14 14
53999- 2 2 6 2 2 6 2 2 6 6 6 6
54000- 86 86 86 46 46 46 14 14 14 0 0 0
54001- 0 0 0 0 0 0 0 0 0 0 0 0
54002- 0 0 0 0 0 0 0 0 0 0 0 0
54003- 0 0 0 0 0 0 0 0 0 0 0 0
54004- 0 0 0 0 0 0 0 0 0 0 0 0
54005- 0 0 0 0 0 0 0 0 0 0 0 0
54006- 0 0 0 0 0 0 0 0 0 0 0 0
54007- 0 0 0 0 0 0 0 0 0 0 0 0
54008- 0 0 0 6 6 6 14 14 14 42 42 42
54009- 90 90 90 18 18 18 18 18 18 26 26 26
54010- 2 2 6 116 116 116 253 253 253 253 253 253
54011-253 253 253 253 253 253 253 253 253 253 253 253
54012-253 253 253 253 253 253 250 250 250 238 238 238
54013-253 253 253 253 253 253 253 253 253 253 253 253
54014-253 253 253 253 253 253 253 253 253 253 253 253
54015-253 253 253 253 253 253 253 253 253 253 253 253
54016-253 253 253 253 253 253 253 253 253 253 253 253
54017-253 253 253 253 253 253 94 94 94 6 6 6
54018- 2 2 6 2 2 6 10 10 10 34 34 34
54019- 2 2 6 2 2 6 2 2 6 2 2 6
54020- 74 74 74 58 58 58 22 22 22 6 6 6
54021- 0 0 0 0 0 0 0 0 0 0 0 0
54022- 0 0 0 0 0 0 0 0 0 0 0 0
54023- 0 0 0 0 0 0 0 0 0 0 0 0
54024- 0 0 0 0 0 0 0 0 0 0 0 0
54025- 0 0 0 0 0 0 0 0 0 0 0 0
54026- 0 0 0 0 0 0 0 0 0 0 0 0
54027- 0 0 0 0 0 0 0 0 0 0 0 0
54028- 0 0 0 10 10 10 26 26 26 66 66 66
54029- 82 82 82 2 2 6 38 38 38 6 6 6
54030- 14 14 14 210 210 210 253 253 253 253 253 253
54031-253 253 253 253 253 253 253 253 253 253 253 253
54032-253 253 253 253 253 253 246 246 246 242 242 242
54033-253 253 253 253 253 253 253 253 253 253 253 253
54034-253 253 253 253 253 253 253 253 253 253 253 253
54035-253 253 253 253 253 253 253 253 253 253 253 253
54036-253 253 253 253 253 253 253 253 253 253 253 253
54037-253 253 253 253 253 253 144 144 144 2 2 6
54038- 2 2 6 2 2 6 2 2 6 46 46 46
54039- 2 2 6 2 2 6 2 2 6 2 2 6
54040- 42 42 42 74 74 74 30 30 30 10 10 10
54041- 0 0 0 0 0 0 0 0 0 0 0 0
54042- 0 0 0 0 0 0 0 0 0 0 0 0
54043- 0 0 0 0 0 0 0 0 0 0 0 0
54044- 0 0 0 0 0 0 0 0 0 0 0 0
54045- 0 0 0 0 0 0 0 0 0 0 0 0
54046- 0 0 0 0 0 0 0 0 0 0 0 0
54047- 0 0 0 0 0 0 0 0 0 0 0 0
54048- 6 6 6 14 14 14 42 42 42 90 90 90
54049- 26 26 26 6 6 6 42 42 42 2 2 6
54050- 74 74 74 250 250 250 253 253 253 253 253 253
54051-253 253 253 253 253 253 253 253 253 253 253 253
54052-253 253 253 253 253 253 242 242 242 242 242 242
54053-253 253 253 253 253 253 253 253 253 253 253 253
54054-253 253 253 253 253 253 253 253 253 253 253 253
54055-253 253 253 253 253 253 253 253 253 253 253 253
54056-253 253 253 253 253 253 253 253 253 253 253 253
54057-253 253 253 253 253 253 182 182 182 2 2 6
54058- 2 2 6 2 2 6 2 2 6 46 46 46
54059- 2 2 6 2 2 6 2 2 6 2 2 6
54060- 10 10 10 86 86 86 38 38 38 10 10 10
54061- 0 0 0 0 0 0 0 0 0 0 0 0
54062- 0 0 0 0 0 0 0 0 0 0 0 0
54063- 0 0 0 0 0 0 0 0 0 0 0 0
54064- 0 0 0 0 0 0 0 0 0 0 0 0
54065- 0 0 0 0 0 0 0 0 0 0 0 0
54066- 0 0 0 0 0 0 0 0 0 0 0 0
54067- 0 0 0 0 0 0 0 0 0 0 0 0
54068- 10 10 10 26 26 26 66 66 66 82 82 82
54069- 2 2 6 22 22 22 18 18 18 2 2 6
54070-149 149 149 253 253 253 253 253 253 253 253 253
54071-253 253 253 253 253 253 253 253 253 253 253 253
54072-253 253 253 253 253 253 234 234 234 242 242 242
54073-253 253 253 253 253 253 253 253 253 253 253 253
54074-253 253 253 253 253 253 253 253 253 253 253 253
54075-253 253 253 253 253 253 253 253 253 253 253 253
54076-253 253 253 253 253 253 253 253 253 253 253 253
54077-253 253 253 253 253 253 206 206 206 2 2 6
54078- 2 2 6 2 2 6 2 2 6 38 38 38
54079- 2 2 6 2 2 6 2 2 6 2 2 6
54080- 6 6 6 86 86 86 46 46 46 14 14 14
54081- 0 0 0 0 0 0 0 0 0 0 0 0
54082- 0 0 0 0 0 0 0 0 0 0 0 0
54083- 0 0 0 0 0 0 0 0 0 0 0 0
54084- 0 0 0 0 0 0 0 0 0 0 0 0
54085- 0 0 0 0 0 0 0 0 0 0 0 0
54086- 0 0 0 0 0 0 0 0 0 0 0 0
54087- 0 0 0 0 0 0 0 0 0 6 6 6
54088- 18 18 18 46 46 46 86 86 86 18 18 18
54089- 2 2 6 34 34 34 10 10 10 6 6 6
54090-210 210 210 253 253 253 253 253 253 253 253 253
54091-253 253 253 253 253 253 253 253 253 253 253 253
54092-253 253 253 253 253 253 234 234 234 242 242 242
54093-253 253 253 253 253 253 253 253 253 253 253 253
54094-253 253 253 253 253 253 253 253 253 253 253 253
54095-253 253 253 253 253 253 253 253 253 253 253 253
54096-253 253 253 253 253 253 253 253 253 253 253 253
54097-253 253 253 253 253 253 221 221 221 6 6 6
54098- 2 2 6 2 2 6 6 6 6 30 30 30
54099- 2 2 6 2 2 6 2 2 6 2 2 6
54100- 2 2 6 82 82 82 54 54 54 18 18 18
54101- 6 6 6 0 0 0 0 0 0 0 0 0
54102- 0 0 0 0 0 0 0 0 0 0 0 0
54103- 0 0 0 0 0 0 0 0 0 0 0 0
54104- 0 0 0 0 0 0 0 0 0 0 0 0
54105- 0 0 0 0 0 0 0 0 0 0 0 0
54106- 0 0 0 0 0 0 0 0 0 0 0 0
54107- 0 0 0 0 0 0 0 0 0 10 10 10
54108- 26 26 26 66 66 66 62 62 62 2 2 6
54109- 2 2 6 38 38 38 10 10 10 26 26 26
54110-238 238 238 253 253 253 253 253 253 253 253 253
54111-253 253 253 253 253 253 253 253 253 253 253 253
54112-253 253 253 253 253 253 231 231 231 238 238 238
54113-253 253 253 253 253 253 253 253 253 253 253 253
54114-253 253 253 253 253 253 253 253 253 253 253 253
54115-253 253 253 253 253 253 253 253 253 253 253 253
54116-253 253 253 253 253 253 253 253 253 253 253 253
54117-253 253 253 253 253 253 231 231 231 6 6 6
54118- 2 2 6 2 2 6 10 10 10 30 30 30
54119- 2 2 6 2 2 6 2 2 6 2 2 6
54120- 2 2 6 66 66 66 58 58 58 22 22 22
54121- 6 6 6 0 0 0 0 0 0 0 0 0
54122- 0 0 0 0 0 0 0 0 0 0 0 0
54123- 0 0 0 0 0 0 0 0 0 0 0 0
54124- 0 0 0 0 0 0 0 0 0 0 0 0
54125- 0 0 0 0 0 0 0 0 0 0 0 0
54126- 0 0 0 0 0 0 0 0 0 0 0 0
54127- 0 0 0 0 0 0 0 0 0 10 10 10
54128- 38 38 38 78 78 78 6 6 6 2 2 6
54129- 2 2 6 46 46 46 14 14 14 42 42 42
54130-246 246 246 253 253 253 253 253 253 253 253 253
54131-253 253 253 253 253 253 253 253 253 253 253 253
54132-253 253 253 253 253 253 231 231 231 242 242 242
54133-253 253 253 253 253 253 253 253 253 253 253 253
54134-253 253 253 253 253 253 253 253 253 253 253 253
54135-253 253 253 253 253 253 253 253 253 253 253 253
54136-253 253 253 253 253 253 253 253 253 253 253 253
54137-253 253 253 253 253 253 234 234 234 10 10 10
54138- 2 2 6 2 2 6 22 22 22 14 14 14
54139- 2 2 6 2 2 6 2 2 6 2 2 6
54140- 2 2 6 66 66 66 62 62 62 22 22 22
54141- 6 6 6 0 0 0 0 0 0 0 0 0
54142- 0 0 0 0 0 0 0 0 0 0 0 0
54143- 0 0 0 0 0 0 0 0 0 0 0 0
54144- 0 0 0 0 0 0 0 0 0 0 0 0
54145- 0 0 0 0 0 0 0 0 0 0 0 0
54146- 0 0 0 0 0 0 0 0 0 0 0 0
54147- 0 0 0 0 0 0 6 6 6 18 18 18
54148- 50 50 50 74 74 74 2 2 6 2 2 6
54149- 14 14 14 70 70 70 34 34 34 62 62 62
54150-250 250 250 253 253 253 253 253 253 253 253 253
54151-253 253 253 253 253 253 253 253 253 253 253 253
54152-253 253 253 253 253 253 231 231 231 246 246 246
54153-253 253 253 253 253 253 253 253 253 253 253 253
54154-253 253 253 253 253 253 253 253 253 253 253 253
54155-253 253 253 253 253 253 253 253 253 253 253 253
54156-253 253 253 253 253 253 253 253 253 253 253 253
54157-253 253 253 253 253 253 234 234 234 14 14 14
54158- 2 2 6 2 2 6 30 30 30 2 2 6
54159- 2 2 6 2 2 6 2 2 6 2 2 6
54160- 2 2 6 66 66 66 62 62 62 22 22 22
54161- 6 6 6 0 0 0 0 0 0 0 0 0
54162- 0 0 0 0 0 0 0 0 0 0 0 0
54163- 0 0 0 0 0 0 0 0 0 0 0 0
54164- 0 0 0 0 0 0 0 0 0 0 0 0
54165- 0 0 0 0 0 0 0 0 0 0 0 0
54166- 0 0 0 0 0 0 0 0 0 0 0 0
54167- 0 0 0 0 0 0 6 6 6 18 18 18
54168- 54 54 54 62 62 62 2 2 6 2 2 6
54169- 2 2 6 30 30 30 46 46 46 70 70 70
54170-250 250 250 253 253 253 253 253 253 253 253 253
54171-253 253 253 253 253 253 253 253 253 253 253 253
54172-253 253 253 253 253 253 231 231 231 246 246 246
54173-253 253 253 253 253 253 253 253 253 253 253 253
54174-253 253 253 253 253 253 253 253 253 253 253 253
54175-253 253 253 253 253 253 253 253 253 253 253 253
54176-253 253 253 253 253 253 253 253 253 253 253 253
54177-253 253 253 253 253 253 226 226 226 10 10 10
54178- 2 2 6 6 6 6 30 30 30 2 2 6
54179- 2 2 6 2 2 6 2 2 6 2 2 6
54180- 2 2 6 66 66 66 58 58 58 22 22 22
54181- 6 6 6 0 0 0 0 0 0 0 0 0
54182- 0 0 0 0 0 0 0 0 0 0 0 0
54183- 0 0 0 0 0 0 0 0 0 0 0 0
54184- 0 0 0 0 0 0 0 0 0 0 0 0
54185- 0 0 0 0 0 0 0 0 0 0 0 0
54186- 0 0 0 0 0 0 0 0 0 0 0 0
54187- 0 0 0 0 0 0 6 6 6 22 22 22
54188- 58 58 58 62 62 62 2 2 6 2 2 6
54189- 2 2 6 2 2 6 30 30 30 78 78 78
54190-250 250 250 253 253 253 253 253 253 253 253 253
54191-253 253 253 253 253 253 253 253 253 253 253 253
54192-253 253 253 253 253 253 231 231 231 246 246 246
54193-253 253 253 253 253 253 253 253 253 253 253 253
54194-253 253 253 253 253 253 253 253 253 253 253 253
54195-253 253 253 253 253 253 253 253 253 253 253 253
54196-253 253 253 253 253 253 253 253 253 253 253 253
54197-253 253 253 253 253 253 206 206 206 2 2 6
54198- 22 22 22 34 34 34 18 14 6 22 22 22
54199- 26 26 26 18 18 18 6 6 6 2 2 6
54200- 2 2 6 82 82 82 54 54 54 18 18 18
54201- 6 6 6 0 0 0 0 0 0 0 0 0
54202- 0 0 0 0 0 0 0 0 0 0 0 0
54203- 0 0 0 0 0 0 0 0 0 0 0 0
54204- 0 0 0 0 0 0 0 0 0 0 0 0
54205- 0 0 0 0 0 0 0 0 0 0 0 0
54206- 0 0 0 0 0 0 0 0 0 0 0 0
54207- 0 0 0 0 0 0 6 6 6 26 26 26
54208- 62 62 62 106 106 106 74 54 14 185 133 11
54209-210 162 10 121 92 8 6 6 6 62 62 62
54210-238 238 238 253 253 253 253 253 253 253 253 253
54211-253 253 253 253 253 253 253 253 253 253 253 253
54212-253 253 253 253 253 253 231 231 231 246 246 246
54213-253 253 253 253 253 253 253 253 253 253 253 253
54214-253 253 253 253 253 253 253 253 253 253 253 253
54215-253 253 253 253 253 253 253 253 253 253 253 253
54216-253 253 253 253 253 253 253 253 253 253 253 253
54217-253 253 253 253 253 253 158 158 158 18 18 18
54218- 14 14 14 2 2 6 2 2 6 2 2 6
54219- 6 6 6 18 18 18 66 66 66 38 38 38
54220- 6 6 6 94 94 94 50 50 50 18 18 18
54221- 6 6 6 0 0 0 0 0 0 0 0 0
54222- 0 0 0 0 0 0 0 0 0 0 0 0
54223- 0 0 0 0 0 0 0 0 0 0 0 0
54224- 0 0 0 0 0 0 0 0 0 0 0 0
54225- 0 0 0 0 0 0 0 0 0 0 0 0
54226- 0 0 0 0 0 0 0 0 0 6 6 6
54227- 10 10 10 10 10 10 18 18 18 38 38 38
54228- 78 78 78 142 134 106 216 158 10 242 186 14
54229-246 190 14 246 190 14 156 118 10 10 10 10
54230- 90 90 90 238 238 238 253 253 253 253 253 253
54231-253 253 253 253 253 253 253 253 253 253 253 253
54232-253 253 253 253 253 253 231 231 231 250 250 250
54233-253 253 253 253 253 253 253 253 253 253 253 253
54234-253 253 253 253 253 253 253 253 253 253 253 253
54235-253 253 253 253 253 253 253 253 253 253 253 253
54236-253 253 253 253 253 253 253 253 253 246 230 190
54237-238 204 91 238 204 91 181 142 44 37 26 9
54238- 2 2 6 2 2 6 2 2 6 2 2 6
54239- 2 2 6 2 2 6 38 38 38 46 46 46
54240- 26 26 26 106 106 106 54 54 54 18 18 18
54241- 6 6 6 0 0 0 0 0 0 0 0 0
54242- 0 0 0 0 0 0 0 0 0 0 0 0
54243- 0 0 0 0 0 0 0 0 0 0 0 0
54244- 0 0 0 0 0 0 0 0 0 0 0 0
54245- 0 0 0 0 0 0 0 0 0 0 0 0
54246- 0 0 0 6 6 6 14 14 14 22 22 22
54247- 30 30 30 38 38 38 50 50 50 70 70 70
54248-106 106 106 190 142 34 226 170 11 242 186 14
54249-246 190 14 246 190 14 246 190 14 154 114 10
54250- 6 6 6 74 74 74 226 226 226 253 253 253
54251-253 253 253 253 253 253 253 253 253 253 253 253
54252-253 253 253 253 253 253 231 231 231 250 250 250
54253-253 253 253 253 253 253 253 253 253 253 253 253
54254-253 253 253 253 253 253 253 253 253 253 253 253
54255-253 253 253 253 253 253 253 253 253 253 253 253
54256-253 253 253 253 253 253 253 253 253 228 184 62
54257-241 196 14 241 208 19 232 195 16 38 30 10
54258- 2 2 6 2 2 6 2 2 6 2 2 6
54259- 2 2 6 6 6 6 30 30 30 26 26 26
54260-203 166 17 154 142 90 66 66 66 26 26 26
54261- 6 6 6 0 0 0 0 0 0 0 0 0
54262- 0 0 0 0 0 0 0 0 0 0 0 0
54263- 0 0 0 0 0 0 0 0 0 0 0 0
54264- 0 0 0 0 0 0 0 0 0 0 0 0
54265- 0 0 0 0 0 0 0 0 0 0 0 0
54266- 6 6 6 18 18 18 38 38 38 58 58 58
54267- 78 78 78 86 86 86 101 101 101 123 123 123
54268-175 146 61 210 150 10 234 174 13 246 186 14
54269-246 190 14 246 190 14 246 190 14 238 190 10
54270-102 78 10 2 2 6 46 46 46 198 198 198
54271-253 253 253 253 253 253 253 253 253 253 253 253
54272-253 253 253 253 253 253 234 234 234 242 242 242
54273-253 253 253 253 253 253 253 253 253 253 253 253
54274-253 253 253 253 253 253 253 253 253 253 253 253
54275-253 253 253 253 253 253 253 253 253 253 253 253
54276-253 253 253 253 253 253 253 253 253 224 178 62
54277-242 186 14 241 196 14 210 166 10 22 18 6
54278- 2 2 6 2 2 6 2 2 6 2 2 6
54279- 2 2 6 2 2 6 6 6 6 121 92 8
54280-238 202 15 232 195 16 82 82 82 34 34 34
54281- 10 10 10 0 0 0 0 0 0 0 0 0
54282- 0 0 0 0 0 0 0 0 0 0 0 0
54283- 0 0 0 0 0 0 0 0 0 0 0 0
54284- 0 0 0 0 0 0 0 0 0 0 0 0
54285- 0 0 0 0 0 0 0 0 0 0 0 0
54286- 14 14 14 38 38 38 70 70 70 154 122 46
54287-190 142 34 200 144 11 197 138 11 197 138 11
54288-213 154 11 226 170 11 242 186 14 246 190 14
54289-246 190 14 246 190 14 246 190 14 246 190 14
54290-225 175 15 46 32 6 2 2 6 22 22 22
54291-158 158 158 250 250 250 253 253 253 253 253 253
54292-253 253 253 253 253 253 253 253 253 253 253 253
54293-253 253 253 253 253 253 253 253 253 253 253 253
54294-253 253 253 253 253 253 253 253 253 253 253 253
54295-253 253 253 253 253 253 253 253 253 253 253 253
54296-253 253 253 250 250 250 242 242 242 224 178 62
54297-239 182 13 236 186 11 213 154 11 46 32 6
54298- 2 2 6 2 2 6 2 2 6 2 2 6
54299- 2 2 6 2 2 6 61 42 6 225 175 15
54300-238 190 10 236 186 11 112 100 78 42 42 42
54301- 14 14 14 0 0 0 0 0 0 0 0 0
54302- 0 0 0 0 0 0 0 0 0 0 0 0
54303- 0 0 0 0 0 0 0 0 0 0 0 0
54304- 0 0 0 0 0 0 0 0 0 0 0 0
54305- 0 0 0 0 0 0 0 0 0 6 6 6
54306- 22 22 22 54 54 54 154 122 46 213 154 11
54307-226 170 11 230 174 11 226 170 11 226 170 11
54308-236 178 12 242 186 14 246 190 14 246 190 14
54309-246 190 14 246 190 14 246 190 14 246 190 14
54310-241 196 14 184 144 12 10 10 10 2 2 6
54311- 6 6 6 116 116 116 242 242 242 253 253 253
54312-253 253 253 253 253 253 253 253 253 253 253 253
54313-253 253 253 253 253 253 253 253 253 253 253 253
54314-253 253 253 253 253 253 253 253 253 253 253 253
54315-253 253 253 253 253 253 253 253 253 253 253 253
54316-253 253 253 231 231 231 198 198 198 214 170 54
54317-236 178 12 236 178 12 210 150 10 137 92 6
54318- 18 14 6 2 2 6 2 2 6 2 2 6
54319- 6 6 6 70 47 6 200 144 11 236 178 12
54320-239 182 13 239 182 13 124 112 88 58 58 58
54321- 22 22 22 6 6 6 0 0 0 0 0 0
54322- 0 0 0 0 0 0 0 0 0 0 0 0
54323- 0 0 0 0 0 0 0 0 0 0 0 0
54324- 0 0 0 0 0 0 0 0 0 0 0 0
54325- 0 0 0 0 0 0 0 0 0 10 10 10
54326- 30 30 30 70 70 70 180 133 36 226 170 11
54327-239 182 13 242 186 14 242 186 14 246 186 14
54328-246 190 14 246 190 14 246 190 14 246 190 14
54329-246 190 14 246 190 14 246 190 14 246 190 14
54330-246 190 14 232 195 16 98 70 6 2 2 6
54331- 2 2 6 2 2 6 66 66 66 221 221 221
54332-253 253 253 253 253 253 253 253 253 253 253 253
54333-253 253 253 253 253 253 253 253 253 253 253 253
54334-253 253 253 253 253 253 253 253 253 253 253 253
54335-253 253 253 253 253 253 253 253 253 253 253 253
54336-253 253 253 206 206 206 198 198 198 214 166 58
54337-230 174 11 230 174 11 216 158 10 192 133 9
54338-163 110 8 116 81 8 102 78 10 116 81 8
54339-167 114 7 197 138 11 226 170 11 239 182 13
54340-242 186 14 242 186 14 162 146 94 78 78 78
54341- 34 34 34 14 14 14 6 6 6 0 0 0
54342- 0 0 0 0 0 0 0 0 0 0 0 0
54343- 0 0 0 0 0 0 0 0 0 0 0 0
54344- 0 0 0 0 0 0 0 0 0 0 0 0
54345- 0 0 0 0 0 0 0 0 0 6 6 6
54346- 30 30 30 78 78 78 190 142 34 226 170 11
54347-239 182 13 246 190 14 246 190 14 246 190 14
54348-246 190 14 246 190 14 246 190 14 246 190 14
54349-246 190 14 246 190 14 246 190 14 246 190 14
54350-246 190 14 241 196 14 203 166 17 22 18 6
54351- 2 2 6 2 2 6 2 2 6 38 38 38
54352-218 218 218 253 253 253 253 253 253 253 253 253
54353-253 253 253 253 253 253 253 253 253 253 253 253
54354-253 253 253 253 253 253 253 253 253 253 253 253
54355-253 253 253 253 253 253 253 253 253 253 253 253
54356-250 250 250 206 206 206 198 198 198 202 162 69
54357-226 170 11 236 178 12 224 166 10 210 150 10
54358-200 144 11 197 138 11 192 133 9 197 138 11
54359-210 150 10 226 170 11 242 186 14 246 190 14
54360-246 190 14 246 186 14 225 175 15 124 112 88
54361- 62 62 62 30 30 30 14 14 14 6 6 6
54362- 0 0 0 0 0 0 0 0 0 0 0 0
54363- 0 0 0 0 0 0 0 0 0 0 0 0
54364- 0 0 0 0 0 0 0 0 0 0 0 0
54365- 0 0 0 0 0 0 0 0 0 10 10 10
54366- 30 30 30 78 78 78 174 135 50 224 166 10
54367-239 182 13 246 190 14 246 190 14 246 190 14
54368-246 190 14 246 190 14 246 190 14 246 190 14
54369-246 190 14 246 190 14 246 190 14 246 190 14
54370-246 190 14 246 190 14 241 196 14 139 102 15
54371- 2 2 6 2 2 6 2 2 6 2 2 6
54372- 78 78 78 250 250 250 253 253 253 253 253 253
54373-253 253 253 253 253 253 253 253 253 253 253 253
54374-253 253 253 253 253 253 253 253 253 253 253 253
54375-253 253 253 253 253 253 253 253 253 253 253 253
54376-250 250 250 214 214 214 198 198 198 190 150 46
54377-219 162 10 236 178 12 234 174 13 224 166 10
54378-216 158 10 213 154 11 213 154 11 216 158 10
54379-226 170 11 239 182 13 246 190 14 246 190 14
54380-246 190 14 246 190 14 242 186 14 206 162 42
54381-101 101 101 58 58 58 30 30 30 14 14 14
54382- 6 6 6 0 0 0 0 0 0 0 0 0
54383- 0 0 0 0 0 0 0 0 0 0 0 0
54384- 0 0 0 0 0 0 0 0 0 0 0 0
54385- 0 0 0 0 0 0 0 0 0 10 10 10
54386- 30 30 30 74 74 74 174 135 50 216 158 10
54387-236 178 12 246 190 14 246 190 14 246 190 14
54388-246 190 14 246 190 14 246 190 14 246 190 14
54389-246 190 14 246 190 14 246 190 14 246 190 14
54390-246 190 14 246 190 14 241 196 14 226 184 13
54391- 61 42 6 2 2 6 2 2 6 2 2 6
54392- 22 22 22 238 238 238 253 253 253 253 253 253
54393-253 253 253 253 253 253 253 253 253 253 253 253
54394-253 253 253 253 253 253 253 253 253 253 253 253
54395-253 253 253 253 253 253 253 253 253 253 253 253
54396-253 253 253 226 226 226 187 187 187 180 133 36
54397-216 158 10 236 178 12 239 182 13 236 178 12
54398-230 174 11 226 170 11 226 170 11 230 174 11
54399-236 178 12 242 186 14 246 190 14 246 190 14
54400-246 190 14 246 190 14 246 186 14 239 182 13
54401-206 162 42 106 106 106 66 66 66 34 34 34
54402- 14 14 14 6 6 6 0 0 0 0 0 0
54403- 0 0 0 0 0 0 0 0 0 0 0 0
54404- 0 0 0 0 0 0 0 0 0 0 0 0
54405- 0 0 0 0 0 0 0 0 0 6 6 6
54406- 26 26 26 70 70 70 163 133 67 213 154 11
54407-236 178 12 246 190 14 246 190 14 246 190 14
54408-246 190 14 246 190 14 246 190 14 246 190 14
54409-246 190 14 246 190 14 246 190 14 246 190 14
54410-246 190 14 246 190 14 246 190 14 241 196 14
54411-190 146 13 18 14 6 2 2 6 2 2 6
54412- 46 46 46 246 246 246 253 253 253 253 253 253
54413-253 253 253 253 253 253 253 253 253 253 253 253
54414-253 253 253 253 253 253 253 253 253 253 253 253
54415-253 253 253 253 253 253 253 253 253 253 253 253
54416-253 253 253 221 221 221 86 86 86 156 107 11
54417-216 158 10 236 178 12 242 186 14 246 186 14
54418-242 186 14 239 182 13 239 182 13 242 186 14
54419-242 186 14 246 186 14 246 190 14 246 190 14
54420-246 190 14 246 190 14 246 190 14 246 190 14
54421-242 186 14 225 175 15 142 122 72 66 66 66
54422- 30 30 30 10 10 10 0 0 0 0 0 0
54423- 0 0 0 0 0 0 0 0 0 0 0 0
54424- 0 0 0 0 0 0 0 0 0 0 0 0
54425- 0 0 0 0 0 0 0 0 0 6 6 6
54426- 26 26 26 70 70 70 163 133 67 210 150 10
54427-236 178 12 246 190 14 246 190 14 246 190 14
54428-246 190 14 246 190 14 246 190 14 246 190 14
54429-246 190 14 246 190 14 246 190 14 246 190 14
54430-246 190 14 246 190 14 246 190 14 246 190 14
54431-232 195 16 121 92 8 34 34 34 106 106 106
54432-221 221 221 253 253 253 253 253 253 253 253 253
54433-253 253 253 253 253 253 253 253 253 253 253 253
54434-253 253 253 253 253 253 253 253 253 253 253 253
54435-253 253 253 253 253 253 253 253 253 253 253 253
54436-242 242 242 82 82 82 18 14 6 163 110 8
54437-216 158 10 236 178 12 242 186 14 246 190 14
54438-246 190 14 246 190 14 246 190 14 246 190 14
54439-246 190 14 246 190 14 246 190 14 246 190 14
54440-246 190 14 246 190 14 246 190 14 246 190 14
54441-246 190 14 246 190 14 242 186 14 163 133 67
54442- 46 46 46 18 18 18 6 6 6 0 0 0
54443- 0 0 0 0 0 0 0 0 0 0 0 0
54444- 0 0 0 0 0 0 0 0 0 0 0 0
54445- 0 0 0 0 0 0 0 0 0 10 10 10
54446- 30 30 30 78 78 78 163 133 67 210 150 10
54447-236 178 12 246 186 14 246 190 14 246 190 14
54448-246 190 14 246 190 14 246 190 14 246 190 14
54449-246 190 14 246 190 14 246 190 14 246 190 14
54450-246 190 14 246 190 14 246 190 14 246 190 14
54451-241 196 14 215 174 15 190 178 144 253 253 253
54452-253 253 253 253 253 253 253 253 253 253 253 253
54453-253 253 253 253 253 253 253 253 253 253 253 253
54454-253 253 253 253 253 253 253 253 253 253 253 253
54455-253 253 253 253 253 253 253 253 253 218 218 218
54456- 58 58 58 2 2 6 22 18 6 167 114 7
54457-216 158 10 236 178 12 246 186 14 246 190 14
54458-246 190 14 246 190 14 246 190 14 246 190 14
54459-246 190 14 246 190 14 246 190 14 246 190 14
54460-246 190 14 246 190 14 246 190 14 246 190 14
54461-246 190 14 246 186 14 242 186 14 190 150 46
54462- 54 54 54 22 22 22 6 6 6 0 0 0
54463- 0 0 0 0 0 0 0 0 0 0 0 0
54464- 0 0 0 0 0 0 0 0 0 0 0 0
54465- 0 0 0 0 0 0 0 0 0 14 14 14
54466- 38 38 38 86 86 86 180 133 36 213 154 11
54467-236 178 12 246 186 14 246 190 14 246 190 14
54468-246 190 14 246 190 14 246 190 14 246 190 14
54469-246 190 14 246 190 14 246 190 14 246 190 14
54470-246 190 14 246 190 14 246 190 14 246 190 14
54471-246 190 14 232 195 16 190 146 13 214 214 214
54472-253 253 253 253 253 253 253 253 253 253 253 253
54473-253 253 253 253 253 253 253 253 253 253 253 253
54474-253 253 253 253 253 253 253 253 253 253 253 253
54475-253 253 253 250 250 250 170 170 170 26 26 26
54476- 2 2 6 2 2 6 37 26 9 163 110 8
54477-219 162 10 239 182 13 246 186 14 246 190 14
54478-246 190 14 246 190 14 246 190 14 246 190 14
54479-246 190 14 246 190 14 246 190 14 246 190 14
54480-246 190 14 246 190 14 246 190 14 246 190 14
54481-246 186 14 236 178 12 224 166 10 142 122 72
54482- 46 46 46 18 18 18 6 6 6 0 0 0
54483- 0 0 0 0 0 0 0 0 0 0 0 0
54484- 0 0 0 0 0 0 0 0 0 0 0 0
54485- 0 0 0 0 0 0 6 6 6 18 18 18
54486- 50 50 50 109 106 95 192 133 9 224 166 10
54487-242 186 14 246 190 14 246 190 14 246 190 14
54488-246 190 14 246 190 14 246 190 14 246 190 14
54489-246 190 14 246 190 14 246 190 14 246 190 14
54490-246 190 14 246 190 14 246 190 14 246 190 14
54491-242 186 14 226 184 13 210 162 10 142 110 46
54492-226 226 226 253 253 253 253 253 253 253 253 253
54493-253 253 253 253 253 253 253 253 253 253 253 253
54494-253 253 253 253 253 253 253 253 253 253 253 253
54495-198 198 198 66 66 66 2 2 6 2 2 6
54496- 2 2 6 2 2 6 50 34 6 156 107 11
54497-219 162 10 239 182 13 246 186 14 246 190 14
54498-246 190 14 246 190 14 246 190 14 246 190 14
54499-246 190 14 246 190 14 246 190 14 246 190 14
54500-246 190 14 246 190 14 246 190 14 242 186 14
54501-234 174 13 213 154 11 154 122 46 66 66 66
54502- 30 30 30 10 10 10 0 0 0 0 0 0
54503- 0 0 0 0 0 0 0 0 0 0 0 0
54504- 0 0 0 0 0 0 0 0 0 0 0 0
54505- 0 0 0 0 0 0 6 6 6 22 22 22
54506- 58 58 58 154 121 60 206 145 10 234 174 13
54507-242 186 14 246 186 14 246 190 14 246 190 14
54508-246 190 14 246 190 14 246 190 14 246 190 14
54509-246 190 14 246 190 14 246 190 14 246 190 14
54510-246 190 14 246 190 14 246 190 14 246 190 14
54511-246 186 14 236 178 12 210 162 10 163 110 8
54512- 61 42 6 138 138 138 218 218 218 250 250 250
54513-253 253 253 253 253 253 253 253 253 250 250 250
54514-242 242 242 210 210 210 144 144 144 66 66 66
54515- 6 6 6 2 2 6 2 2 6 2 2 6
54516- 2 2 6 2 2 6 61 42 6 163 110 8
54517-216 158 10 236 178 12 246 190 14 246 190 14
54518-246 190 14 246 190 14 246 190 14 246 190 14
54519-246 190 14 246 190 14 246 190 14 246 190 14
54520-246 190 14 239 182 13 230 174 11 216 158 10
54521-190 142 34 124 112 88 70 70 70 38 38 38
54522- 18 18 18 6 6 6 0 0 0 0 0 0
54523- 0 0 0 0 0 0 0 0 0 0 0 0
54524- 0 0 0 0 0 0 0 0 0 0 0 0
54525- 0 0 0 0 0 0 6 6 6 22 22 22
54526- 62 62 62 168 124 44 206 145 10 224 166 10
54527-236 178 12 239 182 13 242 186 14 242 186 14
54528-246 186 14 246 190 14 246 190 14 246 190 14
54529-246 190 14 246 190 14 246 190 14 246 190 14
54530-246 190 14 246 190 14 246 190 14 246 190 14
54531-246 190 14 236 178 12 216 158 10 175 118 6
54532- 80 54 7 2 2 6 6 6 6 30 30 30
54533- 54 54 54 62 62 62 50 50 50 38 38 38
54534- 14 14 14 2 2 6 2 2 6 2 2 6
54535- 2 2 6 2 2 6 2 2 6 2 2 6
54536- 2 2 6 6 6 6 80 54 7 167 114 7
54537-213 154 11 236 178 12 246 190 14 246 190 14
54538-246 190 14 246 190 14 246 190 14 246 190 14
54539-246 190 14 242 186 14 239 182 13 239 182 13
54540-230 174 11 210 150 10 174 135 50 124 112 88
54541- 82 82 82 54 54 54 34 34 34 18 18 18
54542- 6 6 6 0 0 0 0 0 0 0 0 0
54543- 0 0 0 0 0 0 0 0 0 0 0 0
54544- 0 0 0 0 0 0 0 0 0 0 0 0
54545- 0 0 0 0 0 0 6 6 6 18 18 18
54546- 50 50 50 158 118 36 192 133 9 200 144 11
54547-216 158 10 219 162 10 224 166 10 226 170 11
54548-230 174 11 236 178 12 239 182 13 239 182 13
54549-242 186 14 246 186 14 246 190 14 246 190 14
54550-246 190 14 246 190 14 246 190 14 246 190 14
54551-246 186 14 230 174 11 210 150 10 163 110 8
54552-104 69 6 10 10 10 2 2 6 2 2 6
54553- 2 2 6 2 2 6 2 2 6 2 2 6
54554- 2 2 6 2 2 6 2 2 6 2 2 6
54555- 2 2 6 2 2 6 2 2 6 2 2 6
54556- 2 2 6 6 6 6 91 60 6 167 114 7
54557-206 145 10 230 174 11 242 186 14 246 190 14
54558-246 190 14 246 190 14 246 186 14 242 186 14
54559-239 182 13 230 174 11 224 166 10 213 154 11
54560-180 133 36 124 112 88 86 86 86 58 58 58
54561- 38 38 38 22 22 22 10 10 10 6 6 6
54562- 0 0 0 0 0 0 0 0 0 0 0 0
54563- 0 0 0 0 0 0 0 0 0 0 0 0
54564- 0 0 0 0 0 0 0 0 0 0 0 0
54565- 0 0 0 0 0 0 0 0 0 14 14 14
54566- 34 34 34 70 70 70 138 110 50 158 118 36
54567-167 114 7 180 123 7 192 133 9 197 138 11
54568-200 144 11 206 145 10 213 154 11 219 162 10
54569-224 166 10 230 174 11 239 182 13 242 186 14
54570-246 186 14 246 186 14 246 186 14 246 186 14
54571-239 182 13 216 158 10 185 133 11 152 99 6
54572-104 69 6 18 14 6 2 2 6 2 2 6
54573- 2 2 6 2 2 6 2 2 6 2 2 6
54574- 2 2 6 2 2 6 2 2 6 2 2 6
54575- 2 2 6 2 2 6 2 2 6 2 2 6
54576- 2 2 6 6 6 6 80 54 7 152 99 6
54577-192 133 9 219 162 10 236 178 12 239 182 13
54578-246 186 14 242 186 14 239 182 13 236 178 12
54579-224 166 10 206 145 10 192 133 9 154 121 60
54580- 94 94 94 62 62 62 42 42 42 22 22 22
54581- 14 14 14 6 6 6 0 0 0 0 0 0
54582- 0 0 0 0 0 0 0 0 0 0 0 0
54583- 0 0 0 0 0 0 0 0 0 0 0 0
54584- 0 0 0 0 0 0 0 0 0 0 0 0
54585- 0 0 0 0 0 0 0 0 0 6 6 6
54586- 18 18 18 34 34 34 58 58 58 78 78 78
54587-101 98 89 124 112 88 142 110 46 156 107 11
54588-163 110 8 167 114 7 175 118 6 180 123 7
54589-185 133 11 197 138 11 210 150 10 219 162 10
54590-226 170 11 236 178 12 236 178 12 234 174 13
54591-219 162 10 197 138 11 163 110 8 130 83 6
54592- 91 60 6 10 10 10 2 2 6 2 2 6
54593- 18 18 18 38 38 38 38 38 38 38 38 38
54594- 38 38 38 38 38 38 38 38 38 38 38 38
54595- 38 38 38 38 38 38 26 26 26 2 2 6
54596- 2 2 6 6 6 6 70 47 6 137 92 6
54597-175 118 6 200 144 11 219 162 10 230 174 11
54598-234 174 13 230 174 11 219 162 10 210 150 10
54599-192 133 9 163 110 8 124 112 88 82 82 82
54600- 50 50 50 30 30 30 14 14 14 6 6 6
54601- 0 0 0 0 0 0 0 0 0 0 0 0
54602- 0 0 0 0 0 0 0 0 0 0 0 0
54603- 0 0 0 0 0 0 0 0 0 0 0 0
54604- 0 0 0 0 0 0 0 0 0 0 0 0
54605- 0 0 0 0 0 0 0 0 0 0 0 0
54606- 6 6 6 14 14 14 22 22 22 34 34 34
54607- 42 42 42 58 58 58 74 74 74 86 86 86
54608-101 98 89 122 102 70 130 98 46 121 87 25
54609-137 92 6 152 99 6 163 110 8 180 123 7
54610-185 133 11 197 138 11 206 145 10 200 144 11
54611-180 123 7 156 107 11 130 83 6 104 69 6
54612- 50 34 6 54 54 54 110 110 110 101 98 89
54613- 86 86 86 82 82 82 78 78 78 78 78 78
54614- 78 78 78 78 78 78 78 78 78 78 78 78
54615- 78 78 78 82 82 82 86 86 86 94 94 94
54616-106 106 106 101 101 101 86 66 34 124 80 6
54617-156 107 11 180 123 7 192 133 9 200 144 11
54618-206 145 10 200 144 11 192 133 9 175 118 6
54619-139 102 15 109 106 95 70 70 70 42 42 42
54620- 22 22 22 10 10 10 0 0 0 0 0 0
54621- 0 0 0 0 0 0 0 0 0 0 0 0
54622- 0 0 0 0 0 0 0 0 0 0 0 0
54623- 0 0 0 0 0 0 0 0 0 0 0 0
54624- 0 0 0 0 0 0 0 0 0 0 0 0
54625- 0 0 0 0 0 0 0 0 0 0 0 0
54626- 0 0 0 0 0 0 6 6 6 10 10 10
54627- 14 14 14 22 22 22 30 30 30 38 38 38
54628- 50 50 50 62 62 62 74 74 74 90 90 90
54629-101 98 89 112 100 78 121 87 25 124 80 6
54630-137 92 6 152 99 6 152 99 6 152 99 6
54631-138 86 6 124 80 6 98 70 6 86 66 30
54632-101 98 89 82 82 82 58 58 58 46 46 46
54633- 38 38 38 34 34 34 34 34 34 34 34 34
54634- 34 34 34 34 34 34 34 34 34 34 34 34
54635- 34 34 34 34 34 34 38 38 38 42 42 42
54636- 54 54 54 82 82 82 94 86 76 91 60 6
54637-134 86 6 156 107 11 167 114 7 175 118 6
54638-175 118 6 167 114 7 152 99 6 121 87 25
54639-101 98 89 62 62 62 34 34 34 18 18 18
54640- 6 6 6 0 0 0 0 0 0 0 0 0
54641- 0 0 0 0 0 0 0 0 0 0 0 0
54642- 0 0 0 0 0 0 0 0 0 0 0 0
54643- 0 0 0 0 0 0 0 0 0 0 0 0
54644- 0 0 0 0 0 0 0 0 0 0 0 0
54645- 0 0 0 0 0 0 0 0 0 0 0 0
54646- 0 0 0 0 0 0 0 0 0 0 0 0
54647- 0 0 0 6 6 6 6 6 6 10 10 10
54648- 18 18 18 22 22 22 30 30 30 42 42 42
54649- 50 50 50 66 66 66 86 86 86 101 98 89
54650-106 86 58 98 70 6 104 69 6 104 69 6
54651-104 69 6 91 60 6 82 62 34 90 90 90
54652- 62 62 62 38 38 38 22 22 22 14 14 14
54653- 10 10 10 10 10 10 10 10 10 10 10 10
54654- 10 10 10 10 10 10 6 6 6 10 10 10
54655- 10 10 10 10 10 10 10 10 10 14 14 14
54656- 22 22 22 42 42 42 70 70 70 89 81 66
54657- 80 54 7 104 69 6 124 80 6 137 92 6
54658-134 86 6 116 81 8 100 82 52 86 86 86
54659- 58 58 58 30 30 30 14 14 14 6 6 6
54660- 0 0 0 0 0 0 0 0 0 0 0 0
54661- 0 0 0 0 0 0 0 0 0 0 0 0
54662- 0 0 0 0 0 0 0 0 0 0 0 0
54663- 0 0 0 0 0 0 0 0 0 0 0 0
54664- 0 0 0 0 0 0 0 0 0 0 0 0
54665- 0 0 0 0 0 0 0 0 0 0 0 0
54666- 0 0 0 0 0 0 0 0 0 0 0 0
54667- 0 0 0 0 0 0 0 0 0 0 0 0
54668- 0 0 0 6 6 6 10 10 10 14 14 14
54669- 18 18 18 26 26 26 38 38 38 54 54 54
54670- 70 70 70 86 86 86 94 86 76 89 81 66
54671- 89 81 66 86 86 86 74 74 74 50 50 50
54672- 30 30 30 14 14 14 6 6 6 0 0 0
54673- 0 0 0 0 0 0 0 0 0 0 0 0
54674- 0 0 0 0 0 0 0 0 0 0 0 0
54675- 0 0 0 0 0 0 0 0 0 0 0 0
54676- 6 6 6 18 18 18 34 34 34 58 58 58
54677- 82 82 82 89 81 66 89 81 66 89 81 66
54678- 94 86 66 94 86 76 74 74 74 50 50 50
54679- 26 26 26 14 14 14 6 6 6 0 0 0
54680- 0 0 0 0 0 0 0 0 0 0 0 0
54681- 0 0 0 0 0 0 0 0 0 0 0 0
54682- 0 0 0 0 0 0 0 0 0 0 0 0
54683- 0 0 0 0 0 0 0 0 0 0 0 0
54684- 0 0 0 0 0 0 0 0 0 0 0 0
54685- 0 0 0 0 0 0 0 0 0 0 0 0
54686- 0 0 0 0 0 0 0 0 0 0 0 0
54687- 0 0 0 0 0 0 0 0 0 0 0 0
54688- 0 0 0 0 0 0 0 0 0 0 0 0
54689- 6 6 6 6 6 6 14 14 14 18 18 18
54690- 30 30 30 38 38 38 46 46 46 54 54 54
54691- 50 50 50 42 42 42 30 30 30 18 18 18
54692- 10 10 10 0 0 0 0 0 0 0 0 0
54693- 0 0 0 0 0 0 0 0 0 0 0 0
54694- 0 0 0 0 0 0 0 0 0 0 0 0
54695- 0 0 0 0 0 0 0 0 0 0 0 0
54696- 0 0 0 6 6 6 14 14 14 26 26 26
54697- 38 38 38 50 50 50 58 58 58 58 58 58
54698- 54 54 54 42 42 42 30 30 30 18 18 18
54699- 10 10 10 0 0 0 0 0 0 0 0 0
54700- 0 0 0 0 0 0 0 0 0 0 0 0
54701- 0 0 0 0 0 0 0 0 0 0 0 0
54702- 0 0 0 0 0 0 0 0 0 0 0 0
54703- 0 0 0 0 0 0 0 0 0 0 0 0
54704- 0 0 0 0 0 0 0 0 0 0 0 0
54705- 0 0 0 0 0 0 0 0 0 0 0 0
54706- 0 0 0 0 0 0 0 0 0 0 0 0
54707- 0 0 0 0 0 0 0 0 0 0 0 0
54708- 0 0 0 0 0 0 0 0 0 0 0 0
54709- 0 0 0 0 0 0 0 0 0 6 6 6
54710- 6 6 6 10 10 10 14 14 14 18 18 18
54711- 18 18 18 14 14 14 10 10 10 6 6 6
54712- 0 0 0 0 0 0 0 0 0 0 0 0
54713- 0 0 0 0 0 0 0 0 0 0 0 0
54714- 0 0 0 0 0 0 0 0 0 0 0 0
54715- 0 0 0 0 0 0 0 0 0 0 0 0
54716- 0 0 0 0 0 0 0 0 0 6 6 6
54717- 14 14 14 18 18 18 22 22 22 22 22 22
54718- 18 18 18 14 14 14 10 10 10 6 6 6
54719- 0 0 0 0 0 0 0 0 0 0 0 0
54720- 0 0 0 0 0 0 0 0 0 0 0 0
54721- 0 0 0 0 0 0 0 0 0 0 0 0
54722- 0 0 0 0 0 0 0 0 0 0 0 0
54723- 0 0 0 0 0 0 0 0 0 0 0 0
54724+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54725+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54726+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54727+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54728+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54729+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54730+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54731+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54732+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54733+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54734+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54735+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54736+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54737+4 4 4 4 4 4
54738+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54739+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54740+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54741+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54742+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54743+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54744+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54745+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54746+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54747+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54748+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54749+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54750+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54751+4 4 4 4 4 4
54752+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54753+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54754+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54755+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54756+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54757+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54758+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54759+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54760+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54761+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54762+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54763+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54764+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54765+4 4 4 4 4 4
54766+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54767+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54768+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54769+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54770+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54771+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54772+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54773+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54774+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54775+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54776+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54777+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54778+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54779+4 4 4 4 4 4
54780+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54781+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54782+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54783+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54784+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54785+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54786+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54787+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54788+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54789+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54790+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54791+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54792+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54793+4 4 4 4 4 4
54794+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54795+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54796+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54797+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54798+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54799+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54800+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54801+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54802+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54803+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54804+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54805+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54806+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54807+4 4 4 4 4 4
54808+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54809+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54810+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54811+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54812+4 4 4 4 4 4 4 4 4 3 3 3 0 0 0 0 0 0
54813+0 0 0 0 0 0 0 0 0 0 0 0 3 3 3 4 4 4
54814+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54815+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54816+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54817+4 4 4 4 4 4 4 4 4 4 4 4 1 1 1 0 0 0
54818+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
54819+4 4 4 4 4 4 4 4 4 2 1 0 2 1 0 3 2 2
54820+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54821+4 4 4 4 4 4
54822+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54823+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54824+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54825+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54826+4 4 4 4 4 4 2 2 2 0 0 0 3 4 3 26 28 28
54827+37 38 37 37 38 37 14 17 19 2 2 2 0 0 0 2 2 2
54828+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54829+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54830+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54831+4 4 4 4 4 4 3 3 3 0 0 0 1 1 1 6 6 6
54832+2 2 2 0 0 0 3 3 3 4 4 4 4 4 4 4 4 4
54833+4 4 5 3 3 3 1 0 0 0 0 0 1 0 0 0 0 0
54834+1 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54835+4 4 4 4 4 4
54836+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54837+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54838+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54839+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54840+2 2 2 0 0 0 0 0 0 14 17 19 60 74 84 137 136 137
54841+153 152 153 137 136 137 125 124 125 60 73 81 6 6 6 3 1 0
54842+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
54843+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54844+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54845+4 4 4 4 4 4 0 0 0 4 4 4 41 54 63 125 124 125
54846+60 73 81 6 6 6 4 0 0 3 3 3 4 4 4 4 4 4
54847+4 4 4 0 0 0 6 9 11 41 54 63 41 65 82 22 30 35
54848+2 2 2 2 1 0 4 4 4 4 4 4 4 4 4 4 4 4
54849+4 4 4 4 4 4
54850+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54851+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54852+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54853+4 4 4 4 4 4 5 5 5 5 5 5 2 2 2 0 0 0
54854+4 0 0 6 6 6 41 54 63 137 136 137 174 174 174 167 166 167
54855+165 164 165 165 164 165 163 162 163 163 162 163 125 124 125 41 54 63
54856+1 1 1 0 0 0 0 0 0 3 3 3 5 5 5 4 4 4
54857+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54858+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
54859+3 3 3 2 0 0 4 0 0 60 73 81 156 155 156 167 166 167
54860+163 162 163 85 115 134 5 7 8 0 0 0 4 4 4 5 5 5
54861+0 0 0 2 5 5 55 98 126 90 154 193 90 154 193 72 125 159
54862+37 51 59 2 0 0 1 1 1 4 5 5 4 4 4 4 4 4
54863+4 4 4 4 4 4
54864+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54865+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54866+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54867+4 4 4 5 5 5 4 4 4 1 1 1 0 0 0 3 3 3
54868+37 38 37 125 124 125 163 162 163 174 174 174 158 157 158 158 157 158
54869+156 155 156 156 155 156 158 157 158 165 164 165 174 174 174 166 165 166
54870+125 124 125 16 19 21 1 0 0 0 0 0 0 0 0 4 4 4
54871+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
54872+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 1 1 1
54873+0 0 0 0 0 0 37 38 37 153 152 153 174 174 174 158 157 158
54874+174 174 174 163 162 163 37 38 37 4 3 3 4 0 0 1 1 1
54875+0 0 0 22 40 52 101 161 196 101 161 196 90 154 193 101 161 196
54876+64 123 161 14 17 19 0 0 0 4 4 4 4 4 4 4 4 4
54877+4 4 4 4 4 4
54878+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54879+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54880+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
54881+5 5 5 2 2 2 0 0 0 4 0 0 24 26 27 85 115 134
54882+156 155 156 174 174 174 167 166 167 156 155 156 154 153 154 157 156 157
54883+156 155 156 156 155 156 155 154 155 153 152 153 158 157 158 167 166 167
54884+174 174 174 156 155 156 60 74 84 16 19 21 0 0 0 0 0 0
54885+1 1 1 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
54886+4 4 4 5 5 5 6 6 6 3 3 3 0 0 0 4 0 0
54887+13 16 17 60 73 81 137 136 137 165 164 165 156 155 156 153 152 153
54888+174 174 174 177 184 187 60 73 81 3 1 0 0 0 0 1 1 2
54889+22 30 35 64 123 161 136 185 209 90 154 193 90 154 193 90 154 193
54890+90 154 193 21 29 34 0 0 0 3 2 2 4 4 5 4 4 4
54891+4 4 4 4 4 4
54892+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54893+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54894+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 3 3 3
54895+0 0 0 0 0 0 10 13 16 60 74 84 157 156 157 174 174 174
54896+174 174 174 158 157 158 153 152 153 154 153 154 156 155 156 155 154 155
54897+156 155 156 155 154 155 154 153 154 157 156 157 154 153 154 153 152 153
54898+163 162 163 174 174 174 177 184 187 137 136 137 60 73 81 13 16 17
54899+4 0 0 0 0 0 3 3 3 5 5 5 4 4 4 4 4 4
54900+5 5 5 4 4 4 1 1 1 0 0 0 3 3 3 41 54 63
54901+131 129 131 174 174 174 174 174 174 174 174 174 167 166 167 174 174 174
54902+190 197 201 137 136 137 24 26 27 4 0 0 16 21 25 50 82 103
54903+90 154 193 136 185 209 90 154 193 101 161 196 101 161 196 101 161 196
54904+31 91 132 3 6 7 0 0 0 4 4 4 4 4 4 4 4 4
54905+4 4 4 4 4 4
54906+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54907+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54908+4 4 4 4 4 4 4 4 4 2 2 2 0 0 0 4 0 0
54909+4 0 0 43 57 68 137 136 137 177 184 187 174 174 174 163 162 163
54910+155 154 155 155 154 155 156 155 156 155 154 155 158 157 158 165 164 165
54911+167 166 167 166 165 166 163 162 163 157 156 157 155 154 155 155 154 155
54912+153 152 153 156 155 156 167 166 167 174 174 174 174 174 174 131 129 131
54913+41 54 63 5 5 5 0 0 0 0 0 0 3 3 3 4 4 4
54914+1 1 1 0 0 0 1 0 0 26 28 28 125 124 125 174 174 174
54915+177 184 187 174 174 174 174 174 174 156 155 156 131 129 131 137 136 137
54916+125 124 125 24 26 27 4 0 0 41 65 82 90 154 193 136 185 209
54917+136 185 209 101 161 196 53 118 160 37 112 160 90 154 193 34 86 122
54918+7 12 15 0 0 0 4 4 4 4 4 4 4 4 4 4 4 4
54919+4 4 4 4 4 4
54920+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54921+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54922+4 4 4 3 3 3 0 0 0 0 0 0 5 5 5 37 38 37
54923+125 124 125 167 166 167 174 174 174 167 166 167 158 157 158 155 154 155
54924+156 155 156 156 155 156 156 155 156 163 162 163 167 166 167 155 154 155
54925+137 136 137 153 152 153 156 155 156 165 164 165 163 162 163 156 155 156
54926+156 155 156 156 155 156 155 154 155 158 157 158 166 165 166 174 174 174
54927+167 166 167 125 124 125 37 38 37 1 0 0 0 0 0 0 0 0
54928+0 0 0 24 26 27 60 74 84 158 157 158 174 174 174 174 174 174
54929+166 165 166 158 157 158 125 124 125 41 54 63 13 16 17 6 6 6
54930+6 6 6 37 38 37 80 127 157 136 185 209 101 161 196 101 161 196
54931+90 154 193 28 67 93 6 10 14 13 20 25 13 20 25 6 10 14
54932+1 1 2 4 3 3 4 4 4 4 4 4 4 4 4 4 4 4
54933+4 4 4 4 4 4
54934+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54935+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54936+1 1 1 1 0 0 4 3 3 37 38 37 60 74 84 153 152 153
54937+167 166 167 167 166 167 158 157 158 154 153 154 155 154 155 156 155 156
54938+157 156 157 158 157 158 167 166 167 167 166 167 131 129 131 43 57 68
54939+26 28 28 37 38 37 60 73 81 131 129 131 165 164 165 166 165 166
54940+158 157 158 155 154 155 156 155 156 156 155 156 156 155 156 158 157 158
54941+165 164 165 174 174 174 163 162 163 60 74 84 16 19 21 13 16 17
54942+60 73 81 131 129 131 174 174 174 174 174 174 167 166 167 165 164 165
54943+137 136 137 60 73 81 24 26 27 4 0 0 4 0 0 16 19 21
54944+52 104 138 101 161 196 136 185 209 136 185 209 90 154 193 27 99 146
54945+13 20 25 4 5 7 2 5 5 4 5 7 1 1 2 0 0 0
54946+4 4 4 4 4 4 3 3 3 2 2 2 2 2 2 4 4 4
54947+4 4 4 4 4 4
54948+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54949+4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 0 0 0
54950+0 0 0 13 16 17 60 73 81 137 136 137 174 174 174 166 165 166
54951+158 157 158 156 155 156 157 156 157 156 155 156 155 154 155 158 157 158
54952+167 166 167 174 174 174 153 152 153 60 73 81 16 19 21 4 0 0
54953+4 0 0 4 0 0 6 6 6 26 28 28 60 74 84 158 157 158
54954+174 174 174 166 165 166 157 156 157 155 154 155 156 155 156 156 155 156
54955+155 154 155 158 157 158 167 166 167 167 166 167 131 129 131 125 124 125
54956+137 136 137 167 166 167 167 166 167 174 174 174 158 157 158 125 124 125
54957+16 19 21 4 0 0 4 0 0 10 13 16 49 76 92 107 159 188
54958+136 185 209 136 185 209 90 154 193 26 108 161 22 40 52 6 10 14
54959+2 3 3 1 1 2 1 1 2 4 4 5 4 4 5 4 4 5
54960+4 4 5 2 2 1 0 0 0 0 0 0 0 0 0 2 2 2
54961+4 4 4 4 4 4
54962+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54963+4 4 4 5 5 5 3 3 3 0 0 0 1 0 0 4 0 0
54964+37 51 59 131 129 131 167 166 167 167 166 167 163 162 163 157 156 157
54965+157 156 157 155 154 155 153 152 153 157 156 157 167 166 167 174 174 174
54966+153 152 153 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
54967+4 3 3 4 3 3 4 0 0 6 6 6 4 0 0 37 38 37
54968+125 124 125 174 174 174 174 174 174 165 164 165 156 155 156 154 153 154
54969+156 155 156 156 155 156 155 154 155 163 162 163 158 157 158 163 162 163
54970+174 174 174 174 174 174 174 174 174 125 124 125 37 38 37 0 0 0
54971+4 0 0 6 9 11 41 54 63 90 154 193 136 185 209 146 190 211
54972+136 185 209 37 112 160 22 40 52 6 10 14 3 6 7 1 1 2
54973+1 1 2 3 3 3 1 1 2 3 3 3 4 4 4 4 4 4
54974+2 2 2 2 0 0 16 19 21 37 38 37 24 26 27 0 0 0
54975+0 0 0 4 4 4
54976+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
54977+4 4 4 0 0 0 0 0 0 0 0 0 26 28 28 120 125 127
54978+158 157 158 174 174 174 165 164 165 157 156 157 155 154 155 156 155 156
54979+153 152 153 153 152 153 167 166 167 174 174 174 174 174 174 125 124 125
54980+37 38 37 4 0 0 0 0 0 4 0 0 4 3 3 4 4 4
54981+4 4 4 4 4 4 5 5 5 4 0 0 4 0 0 4 0 0
54982+4 3 3 43 57 68 137 136 137 174 174 174 174 174 174 165 164 165
54983+154 153 154 153 152 153 153 152 153 153 152 153 163 162 163 174 174 174
54984+174 174 174 153 152 153 60 73 81 6 6 6 4 0 0 4 3 3
54985+32 43 50 80 127 157 136 185 209 146 190 211 146 190 211 90 154 193
54986+28 67 93 28 67 93 40 71 93 3 6 7 1 1 2 2 5 5
54987+50 82 103 79 117 143 26 37 45 0 0 0 3 3 3 1 1 1
54988+0 0 0 41 54 63 137 136 137 174 174 174 153 152 153 60 73 81
54989+2 0 0 0 0 0
54990+4 4 4 4 4 4 4 4 4 4 4 4 6 6 6 2 2 2
54991+0 0 0 2 0 0 24 26 27 60 74 84 153 152 153 174 174 174
54992+174 174 174 157 156 157 154 153 154 156 155 156 154 153 154 153 152 153
54993+165 164 165 174 174 174 177 184 187 137 136 137 43 57 68 6 6 6
54994+4 0 0 2 0 0 3 3 3 5 5 5 5 5 5 4 4 4
54995+4 4 4 4 4 4 4 4 4 5 5 5 6 6 6 4 3 3
54996+4 0 0 4 0 0 24 26 27 60 73 81 153 152 153 174 174 174
54997+174 174 174 158 157 158 158 157 158 174 174 174 174 174 174 158 157 158
54998+60 74 84 24 26 27 4 0 0 4 0 0 17 23 27 59 113 148
54999+136 185 209 191 222 234 146 190 211 136 185 209 31 91 132 7 11 13
55000+22 40 52 101 161 196 90 154 193 6 9 11 3 4 4 43 95 132
55001+136 185 209 172 205 220 55 98 126 0 0 0 0 0 0 2 0 0
55002+26 28 28 153 152 153 177 184 187 167 166 167 177 184 187 165 164 165
55003+37 38 37 0 0 0
55004+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
55005+13 16 17 60 73 81 137 136 137 174 174 174 174 174 174 165 164 165
55006+153 152 153 153 152 153 155 154 155 154 153 154 158 157 158 174 174 174
55007+177 184 187 163 162 163 60 73 81 16 19 21 4 0 0 4 0 0
55008+4 3 3 4 4 4 5 5 5 5 5 5 4 4 4 5 5 5
55009+5 5 5 5 5 5 5 5 5 4 4 4 4 4 4 5 5 5
55010+6 6 6 4 0 0 4 0 0 4 0 0 24 26 27 60 74 84
55011+166 165 166 174 174 174 177 184 187 165 164 165 125 124 125 24 26 27
55012+4 0 0 4 0 0 5 5 5 50 82 103 136 185 209 172 205 220
55013+146 190 211 136 185 209 26 108 161 22 40 52 7 12 15 44 81 103
55014+71 116 144 28 67 93 37 51 59 41 65 82 100 139 164 101 161 196
55015+90 154 193 90 154 193 28 67 93 0 0 0 0 0 0 26 28 28
55016+125 124 125 167 166 167 163 162 163 153 152 153 163 162 163 174 174 174
55017+85 115 134 4 0 0
55018+4 4 4 5 5 5 4 4 4 1 0 0 4 0 0 34 47 55
55019+125 124 125 174 174 174 174 174 174 167 166 167 157 156 157 153 152 153
55020+155 154 155 155 154 155 158 157 158 166 165 166 167 166 167 154 153 154
55021+125 124 125 26 28 28 4 0 0 4 0 0 4 0 0 5 5 5
55022+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 1 1 1
55023+0 0 0 0 0 0 1 1 1 4 4 4 4 4 4 4 4 4
55024+5 5 5 5 5 5 4 3 3 4 0 0 4 0 0 6 6 6
55025+37 38 37 131 129 131 137 136 137 37 38 37 0 0 0 4 0 0
55026+4 5 5 43 61 72 90 154 193 172 205 220 146 190 211 136 185 209
55027+90 154 193 28 67 93 13 20 25 43 61 72 71 116 144 44 81 103
55028+2 5 5 7 11 13 59 113 148 101 161 196 90 154 193 28 67 93
55029+13 20 25 6 10 14 0 0 0 13 16 17 60 73 81 137 136 137
55030+166 165 166 158 157 158 156 155 156 154 153 154 167 166 167 174 174 174
55031+60 73 81 4 0 0
55032+4 4 4 4 4 4 0 0 0 3 3 3 60 74 84 174 174 174
55033+174 174 174 167 166 167 163 162 163 155 154 155 157 156 157 155 154 155
55034+156 155 156 163 162 163 167 166 167 158 157 158 125 124 125 37 38 37
55035+4 3 3 4 0 0 4 0 0 6 6 6 6 6 6 5 5 5
55036+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 2 3 3
55037+10 13 16 7 11 13 1 0 0 0 0 0 2 2 1 4 4 4
55038+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 4 0 0
55039+4 0 0 7 11 13 13 16 17 4 0 0 3 3 3 34 47 55
55040+80 127 157 146 190 211 172 205 220 136 185 209 136 185 209 136 185 209
55041+28 67 93 22 40 52 55 98 126 55 98 126 21 29 34 7 11 13
55042+50 82 103 101 161 196 101 161 196 35 83 115 13 20 25 2 2 1
55043+1 1 2 1 1 2 37 51 59 131 129 131 174 174 174 174 174 174
55044+167 166 167 163 162 163 163 162 163 167 166 167 174 174 174 125 124 125
55045+16 19 21 4 0 0
55046+4 4 4 4 0 0 4 0 0 60 74 84 174 174 174 174 174 174
55047+158 157 158 155 154 155 155 154 155 156 155 156 155 154 155 158 157 158
55048+167 166 167 165 164 165 131 129 131 60 73 81 13 16 17 4 0 0
55049+4 0 0 4 3 3 6 6 6 4 3 3 5 5 5 4 4 4
55050+4 4 4 3 2 2 0 0 0 0 0 0 7 11 13 45 69 86
55051+80 127 157 71 116 144 43 61 72 7 11 13 0 0 0 1 1 1
55052+4 3 3 4 4 4 4 4 4 4 4 4 6 6 6 5 5 5
55053+3 2 2 4 0 0 1 0 0 21 29 34 59 113 148 136 185 209
55054+146 190 211 136 185 209 136 185 209 136 185 209 136 185 209 136 185 209
55055+68 124 159 44 81 103 22 40 52 13 16 17 43 61 72 90 154 193
55056+136 185 209 59 113 148 21 29 34 3 4 3 1 1 1 0 0 0
55057+24 26 27 125 124 125 163 162 163 174 174 174 166 165 166 165 164 165
55058+163 162 163 125 124 125 125 124 125 125 124 125 125 124 125 26 28 28
55059+4 0 0 4 3 3
55060+3 3 3 0 0 0 24 26 27 153 152 153 177 184 187 158 157 158
55061+156 155 156 156 155 156 155 154 155 155 154 155 165 164 165 174 174 174
55062+155 154 155 60 74 84 26 28 28 4 0 0 4 0 0 3 1 0
55063+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3
55064+2 0 0 0 0 0 0 0 0 32 43 50 72 125 159 101 161 196
55065+136 185 209 101 161 196 101 161 196 79 117 143 32 43 50 0 0 0
55066+0 0 0 2 2 2 4 4 4 4 4 4 3 3 3 1 0 0
55067+0 0 0 4 5 5 49 76 92 101 161 196 146 190 211 146 190 211
55068+136 185 209 136 185 209 136 185 209 136 185 209 136 185 209 90 154 193
55069+28 67 93 13 16 17 37 51 59 80 127 157 136 185 209 90 154 193
55070+22 40 52 6 9 11 3 4 3 2 2 1 16 19 21 60 73 81
55071+137 136 137 163 162 163 158 157 158 166 165 166 167 166 167 153 152 153
55072+60 74 84 37 38 37 6 6 6 13 16 17 4 0 0 1 0 0
55073+3 2 2 4 4 4
55074+3 2 2 4 0 0 37 38 37 137 136 137 167 166 167 158 157 158
55075+157 156 157 154 153 154 157 156 157 167 166 167 174 174 174 125 124 125
55076+37 38 37 4 0 0 4 0 0 4 0 0 4 3 3 4 4 4
55077+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
55078+0 0 0 16 21 25 55 98 126 90 154 193 136 185 209 101 161 196
55079+101 161 196 101 161 196 136 185 209 136 185 209 101 161 196 55 98 126
55080+14 17 19 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
55081+22 40 52 90 154 193 146 190 211 146 190 211 136 185 209 136 185 209
55082+136 185 209 136 185 209 136 185 209 101 161 196 35 83 115 7 11 13
55083+17 23 27 59 113 148 136 185 209 101 161 196 34 86 122 7 12 15
55084+2 5 5 3 4 3 6 6 6 60 73 81 131 129 131 163 162 163
55085+166 165 166 174 174 174 174 174 174 163 162 163 125 124 125 41 54 63
55086+13 16 17 4 0 0 4 0 0 4 0 0 1 0 0 2 2 2
55087+4 4 4 4 4 4
55088+1 1 1 2 1 0 43 57 68 137 136 137 153 152 153 153 152 153
55089+163 162 163 156 155 156 165 164 165 167 166 167 60 74 84 6 6 6
55090+4 0 0 4 0 0 5 5 5 4 4 4 4 4 4 4 4 4
55091+4 5 5 6 6 6 4 3 3 0 0 0 0 0 0 11 15 18
55092+40 71 93 100 139 164 101 161 196 101 161 196 101 161 196 101 161 196
55093+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 136 185 209
55094+101 161 196 45 69 86 6 6 6 0 0 0 17 23 27 55 98 126
55095+136 185 209 146 190 211 136 185 209 136 185 209 136 185 209 136 185 209
55096+136 185 209 136 185 209 90 154 193 22 40 52 7 11 13 50 82 103
55097+136 185 209 136 185 209 53 118 160 22 40 52 7 11 13 2 5 5
55098+3 4 3 37 38 37 125 124 125 157 156 157 166 165 166 167 166 167
55099+174 174 174 174 174 174 137 136 137 60 73 81 4 0 0 4 0 0
55100+4 0 0 4 0 0 5 5 5 3 3 3 3 3 3 4 4 4
55101+4 4 4 4 4 4
55102+4 0 0 4 0 0 41 54 63 137 136 137 125 124 125 131 129 131
55103+155 154 155 167 166 167 174 174 174 60 74 84 6 6 6 4 0 0
55104+4 3 3 6 6 6 4 4 4 4 4 4 4 4 4 5 5 5
55105+4 4 4 1 1 1 0 0 0 3 6 7 41 65 82 72 125 159
55106+101 161 196 101 161 196 101 161 196 90 154 193 90 154 193 101 161 196
55107+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
55108+136 185 209 136 185 209 80 127 157 55 98 126 101 161 196 146 190 211
55109+136 185 209 136 185 209 136 185 209 101 161 196 136 185 209 101 161 196
55110+136 185 209 101 161 196 35 83 115 22 30 35 101 161 196 172 205 220
55111+90 154 193 28 67 93 7 11 13 2 5 5 3 4 3 13 16 17
55112+85 115 134 167 166 167 174 174 174 174 174 174 174 174 174 174 174 174
55113+167 166 167 60 74 84 13 16 17 4 0 0 4 0 0 4 3 3
55114+6 6 6 5 5 5 4 4 4 5 5 5 4 4 4 5 5 5
55115+5 5 5 5 5 5
55116+1 1 1 4 0 0 41 54 63 137 136 137 137 136 137 125 124 125
55117+131 129 131 167 166 167 157 156 157 37 38 37 6 6 6 4 0 0
55118+6 6 6 5 5 5 4 4 4 4 4 4 4 5 5 2 2 1
55119+0 0 0 0 0 0 26 37 45 58 111 146 101 161 196 101 161 196
55120+101 161 196 90 154 193 90 154 193 90 154 193 101 161 196 101 161 196
55121+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
55122+101 161 196 136 185 209 136 185 209 136 185 209 146 190 211 136 185 209
55123+136 185 209 101 161 196 136 185 209 136 185 209 101 161 196 136 185 209
55124+101 161 196 136 185 209 136 185 209 136 185 209 136 185 209 16 89 141
55125+7 11 13 2 5 5 2 5 5 13 16 17 60 73 81 154 154 154
55126+174 174 174 174 174 174 174 174 174 174 174 174 163 162 163 125 124 125
55127+24 26 27 4 0 0 4 0 0 4 0 0 5 5 5 5 5 5
55128+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
55129+5 5 5 4 4 4
55130+4 0 0 6 6 6 37 38 37 137 136 137 137 136 137 131 129 131
55131+131 129 131 153 152 153 131 129 131 26 28 28 4 0 0 4 3 3
55132+6 6 6 4 4 4 4 4 4 4 4 4 0 0 0 0 0 0
55133+13 20 25 51 88 114 90 154 193 101 161 196 101 161 196 90 154 193
55134+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
55135+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 101 161 196
55136+101 161 196 136 185 209 101 161 196 136 185 209 136 185 209 101 161 196
55137+136 185 209 101 161 196 136 185 209 101 161 196 101 161 196 101 161 196
55138+136 185 209 136 185 209 136 185 209 37 112 160 21 29 34 5 7 8
55139+2 5 5 13 16 17 43 57 68 131 129 131 174 174 174 174 174 174
55140+174 174 174 167 166 167 157 156 157 125 124 125 37 38 37 4 0 0
55141+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
55142+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55143+4 4 4 4 4 4
55144+1 1 1 4 0 0 41 54 63 153 152 153 137 136 137 137 136 137
55145+137 136 137 153 152 153 125 124 125 24 26 27 4 0 0 3 2 2
55146+4 4 4 4 4 4 4 3 3 4 0 0 3 6 7 43 61 72
55147+64 123 161 101 161 196 90 154 193 90 154 193 90 154 193 90 154 193
55148+90 154 193 90 154 193 90 154 193 90 154 193 101 161 196 90 154 193
55149+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
55150+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
55151+136 185 209 101 161 196 101 161 196 136 185 209 136 185 209 101 161 196
55152+101 161 196 90 154 193 28 67 93 13 16 17 7 11 13 3 6 7
55153+37 51 59 125 124 125 163 162 163 174 174 174 167 166 167 166 165 166
55154+167 166 167 131 129 131 60 73 81 4 0 0 4 0 0 4 0 0
55155+3 3 3 5 5 5 6 6 6 4 4 4 4 4 4 4 4 4
55156+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55157+4 4 4 4 4 4
55158+4 0 0 4 0 0 41 54 63 137 136 137 153 152 153 137 136 137
55159+153 152 153 157 156 157 125 124 125 24 26 27 0 0 0 2 2 2
55160+4 4 4 4 4 4 2 0 0 0 0 0 28 67 93 90 154 193
55161+90 154 193 90 154 193 90 154 193 90 154 193 64 123 161 90 154 193
55162+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
55163+90 154 193 101 161 196 101 161 196 101 161 196 90 154 193 136 185 209
55164+101 161 196 101 161 196 136 185 209 101 161 196 136 185 209 101 161 196
55165+101 161 196 101 161 196 136 185 209 101 161 196 101 161 196 90 154 193
55166+35 83 115 13 16 17 3 6 7 2 5 5 13 16 17 60 74 84
55167+154 154 154 166 165 166 165 164 165 158 157 158 163 162 163 157 156 157
55168+60 74 84 13 16 17 4 0 0 4 0 0 3 2 2 4 4 4
55169+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55170+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55171+4 4 4 4 4 4
55172+1 1 1 4 0 0 41 54 63 157 156 157 155 154 155 137 136 137
55173+153 152 153 158 157 158 137 136 137 26 28 28 2 0 0 2 2 2
55174+4 4 4 4 4 4 1 0 0 6 10 14 34 86 122 90 154 193
55175+64 123 161 90 154 193 64 123 161 90 154 193 90 154 193 90 154 193
55176+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
55177+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
55178+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
55179+136 185 209 101 161 196 136 185 209 90 154 193 26 108 161 22 40 52
55180+13 16 17 5 7 8 2 5 5 2 5 5 37 38 37 165 164 165
55181+174 174 174 163 162 163 154 154 154 165 164 165 167 166 167 60 73 81
55182+6 6 6 4 0 0 4 0 0 4 4 4 4 4 4 4 4 4
55183+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55184+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55185+4 4 4 4 4 4
55186+4 0 0 6 6 6 41 54 63 156 155 156 158 157 158 153 152 153
55187+156 155 156 165 164 165 137 136 137 26 28 28 0 0 0 2 2 2
55188+4 4 5 4 4 4 2 0 0 7 12 15 31 96 139 64 123 161
55189+90 154 193 64 123 161 90 154 193 90 154 193 64 123 161 90 154 193
55190+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
55191+90 154 193 90 154 193 90 154 193 101 161 196 101 161 196 101 161 196
55192+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
55193+101 161 196 136 185 209 26 108 161 22 40 52 7 11 13 5 7 8
55194+2 5 5 2 5 5 2 5 5 2 2 1 37 38 37 158 157 158
55195+174 174 174 154 154 154 156 155 156 167 166 167 165 164 165 37 38 37
55196+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
55197+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55198+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55199+4 4 4 4 4 4
55200+3 1 0 4 0 0 60 73 81 157 156 157 163 162 163 153 152 153
55201+158 157 158 167 166 167 137 136 137 26 28 28 2 0 0 2 2 2
55202+4 5 5 4 4 4 4 0 0 7 12 15 24 86 132 26 108 161
55203+37 112 160 64 123 161 90 154 193 64 123 161 90 154 193 90 154 193
55204+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
55205+90 154 193 101 161 196 90 154 193 101 161 196 101 161 196 101 161 196
55206+101 161 196 101 161 196 101 161 196 136 185 209 101 161 196 136 185 209
55207+90 154 193 35 83 115 13 16 17 13 16 17 7 11 13 3 6 7
55208+5 7 8 6 6 6 3 4 3 2 2 1 30 32 34 154 154 154
55209+167 166 167 154 154 154 154 154 154 174 174 174 165 164 165 37 38 37
55210+6 6 6 4 0 0 6 6 6 4 4 4 4 4 4 4 4 4
55211+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55212+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55213+4 4 4 4 4 4
55214+4 0 0 4 0 0 41 54 63 163 162 163 166 165 166 154 154 154
55215+163 162 163 174 174 174 137 136 137 26 28 28 0 0 0 2 2 2
55216+4 5 5 4 4 5 1 1 2 6 10 14 28 67 93 18 97 151
55217+18 97 151 18 97 151 26 108 161 37 112 160 37 112 160 90 154 193
55218+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
55219+90 154 193 101 161 196 101 161 196 90 154 193 101 161 196 101 161 196
55220+101 161 196 101 161 196 101 161 196 136 185 209 90 154 193 16 89 141
55221+13 20 25 7 11 13 5 7 8 5 7 8 2 5 5 4 5 5
55222+3 4 3 4 5 5 3 4 3 0 0 0 37 38 37 158 157 158
55223+174 174 174 158 157 158 158 157 158 167 166 167 174 174 174 41 54 63
55224+4 0 0 3 2 2 5 5 5 4 4 4 4 4 4 4 4 4
55225+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55226+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55227+4 4 4 4 4 4
55228+1 1 1 4 0 0 60 73 81 165 164 165 174 174 174 158 157 158
55229+167 166 167 174 174 174 153 152 153 26 28 28 2 0 0 2 2 2
55230+4 5 5 4 4 4 4 0 0 7 12 15 10 87 144 10 87 144
55231+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
55232+26 108 161 37 112 160 53 118 160 90 154 193 90 154 193 90 154 193
55233+90 154 193 90 154 193 101 161 196 101 161 196 101 161 196 101 161 196
55234+101 161 196 136 185 209 90 154 193 26 108 161 22 40 52 13 16 17
55235+7 11 13 3 6 7 5 7 8 5 7 8 2 5 5 4 5 5
55236+4 5 5 6 6 6 3 4 3 0 0 0 30 32 34 158 157 158
55237+174 174 174 156 155 156 155 154 155 165 164 165 154 153 154 37 38 37
55238+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
55239+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55240+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55241+4 4 4 4 4 4
55242+4 0 0 4 0 0 60 73 81 167 166 167 174 174 174 163 162 163
55243+174 174 174 174 174 174 153 152 153 26 28 28 0 0 0 3 3 3
55244+5 5 5 4 4 4 1 1 2 7 12 15 28 67 93 18 97 151
55245+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
55246+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
55247+90 154 193 26 108 161 90 154 193 90 154 193 90 154 193 101 161 196
55248+101 161 196 26 108 161 22 40 52 13 16 17 7 11 13 2 5 5
55249+2 5 5 6 6 6 2 5 5 4 5 5 4 5 5 4 5 5
55250+3 4 3 5 5 5 3 4 3 2 0 0 30 32 34 137 136 137
55251+153 152 153 137 136 137 131 129 131 137 136 137 131 129 131 37 38 37
55252+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
55253+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55254+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55255+4 4 4 4 4 4
55256+1 1 1 4 0 0 60 73 81 167 166 167 174 174 174 166 165 166
55257+174 174 174 177 184 187 153 152 153 30 32 34 1 0 0 3 3 3
55258+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
55259+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
55260+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
55261+26 108 161 26 108 161 26 108 161 90 154 193 90 154 193 26 108 161
55262+35 83 115 13 16 17 7 11 13 5 7 8 3 6 7 5 7 8
55263+2 5 5 6 6 6 4 5 5 4 5 5 3 4 3 4 5 5
55264+3 4 3 6 6 6 3 4 3 0 0 0 26 28 28 125 124 125
55265+131 129 131 125 124 125 125 124 125 131 129 131 131 129 131 37 38 37
55266+4 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
55267+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55268+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55269+4 4 4 4 4 4
55270+3 1 0 4 0 0 60 73 81 174 174 174 177 184 187 167 166 167
55271+174 174 174 177 184 187 153 152 153 30 32 34 0 0 0 3 3 3
55272+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
55273+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
55274+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
55275+26 108 161 90 154 193 26 108 161 26 108 161 24 86 132 13 20 25
55276+7 11 13 13 20 25 22 40 52 5 7 8 3 4 3 3 4 3
55277+4 5 5 3 4 3 4 5 5 3 4 3 4 5 5 3 4 3
55278+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
55279+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
55280+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
55281+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55282+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55283+4 4 4 4 4 4
55284+1 1 1 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
55285+174 174 174 190 197 201 157 156 157 30 32 34 1 0 0 3 3 3
55286+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
55287+18 97 151 19 95 150 19 95 150 18 97 151 18 97 151 26 108 161
55288+18 97 151 26 108 161 26 108 161 26 108 161 26 108 161 90 154 193
55289+26 108 161 26 108 161 26 108 161 22 40 52 2 5 5 3 4 3
55290+28 67 93 37 112 160 34 86 122 2 5 5 3 4 3 3 4 3
55291+3 4 3 3 4 3 3 4 3 2 2 1 3 4 3 4 4 4
55292+4 5 5 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
55293+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
55294+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
55295+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55296+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55297+4 4 4 4 4 4
55298+4 0 0 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
55299+174 174 174 190 197 201 158 157 158 30 32 34 0 0 0 2 2 2
55300+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
55301+10 87 144 19 95 150 19 95 150 18 97 151 18 97 151 18 97 151
55302+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
55303+18 97 151 22 40 52 2 5 5 2 2 1 22 40 52 26 108 161
55304+90 154 193 37 112 160 22 40 52 3 4 3 13 20 25 22 30 35
55305+3 6 7 1 1 1 2 2 2 6 9 11 5 5 5 4 3 3
55306+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
55307+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
55308+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
55309+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55310+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55311+4 4 4 4 4 4
55312+1 1 1 4 0 0 60 73 81 177 184 187 193 200 203 174 174 174
55313+177 184 187 193 200 203 163 162 163 30 32 34 4 0 0 2 2 2
55314+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
55315+10 87 144 10 87 144 19 95 150 19 95 150 19 95 150 18 97 151
55316+26 108 161 26 108 161 26 108 161 90 154 193 26 108 161 28 67 93
55317+6 10 14 2 5 5 13 20 25 24 86 132 37 112 160 90 154 193
55318+10 87 144 7 12 15 2 5 5 28 67 93 37 112 160 28 67 93
55319+2 2 1 7 12 15 35 83 115 28 67 93 3 6 7 1 0 0
55320+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
55321+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
55322+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
55323+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55324+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55325+4 4 4 4 4 4
55326+4 0 0 4 0 0 60 73 81 174 174 174 190 197 201 174 174 174
55327+177 184 187 193 200 203 163 162 163 30 32 34 0 0 0 2 2 2
55328+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
55329+10 87 144 16 89 141 19 95 150 10 87 144 26 108 161 26 108 161
55330+26 108 161 26 108 161 26 108 161 28 67 93 6 10 14 1 1 2
55331+7 12 15 28 67 93 26 108 161 16 89 141 24 86 132 21 29 34
55332+3 4 3 21 29 34 37 112 160 37 112 160 27 99 146 21 29 34
55333+21 29 34 26 108 161 90 154 193 35 83 115 1 1 2 2 0 0
55334+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
55335+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
55336+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
55337+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55338+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55339+4 4 4 4 4 4
55340+3 1 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
55341+190 197 201 193 200 203 165 164 165 37 38 37 4 0 0 2 2 2
55342+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
55343+10 87 144 10 87 144 16 89 141 18 97 151 18 97 151 10 87 144
55344+24 86 132 24 86 132 13 20 25 4 5 7 4 5 7 22 40 52
55345+18 97 151 37 112 160 26 108 161 7 12 15 1 1 1 0 0 0
55346+28 67 93 37 112 160 26 108 161 28 67 93 22 40 52 28 67 93
55347+26 108 161 90 154 193 26 108 161 10 87 144 0 0 0 2 0 0
55348+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
55349+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
55350+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
55351+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55352+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55353+4 4 4 4 4 4
55354+4 0 0 6 6 6 60 73 81 174 174 174 193 200 203 174 174 174
55355+190 197 201 193 200 203 165 164 165 30 32 34 0 0 0 2 2 2
55356+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
55357+10 87 144 10 87 144 10 87 144 18 97 151 28 67 93 6 10 14
55358+0 0 0 1 1 2 4 5 7 13 20 25 16 89 141 26 108 161
55359+26 108 161 26 108 161 24 86 132 6 9 11 2 3 3 22 40 52
55360+37 112 160 16 89 141 22 40 52 28 67 93 26 108 161 26 108 161
55361+90 154 193 26 108 161 26 108 161 28 67 93 1 1 1 4 0 0
55362+4 4 4 5 5 5 3 3 3 4 0 0 26 28 28 124 126 130
55363+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
55364+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
55365+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55366+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55367+4 4 4 4 4 4
55368+4 0 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
55369+193 200 203 193 200 203 167 166 167 37 38 37 4 0 0 2 2 2
55370+5 5 5 4 4 4 4 0 0 6 10 14 28 67 93 10 87 144
55371+10 87 144 10 87 144 18 97 151 10 87 144 13 20 25 4 5 7
55372+1 1 2 1 1 1 22 40 52 26 108 161 26 108 161 26 108 161
55373+26 108 161 26 108 161 26 108 161 24 86 132 22 40 52 22 40 52
55374+22 40 52 22 40 52 10 87 144 26 108 161 26 108 161 26 108 161
55375+26 108 161 26 108 161 90 154 193 10 87 144 0 0 0 4 0 0
55376+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
55377+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
55378+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
55379+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55380+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55381+4 4 4 4 4 4
55382+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
55383+190 197 201 205 212 215 167 166 167 30 32 34 0 0 0 2 2 2
55384+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
55385+10 87 144 10 87 144 10 87 144 10 87 144 22 40 52 1 1 2
55386+2 0 0 1 1 2 24 86 132 26 108 161 26 108 161 26 108 161
55387+26 108 161 19 95 150 16 89 141 10 87 144 22 40 52 22 40 52
55388+10 87 144 26 108 161 37 112 160 26 108 161 26 108 161 26 108 161
55389+26 108 161 26 108 161 26 108 161 28 67 93 2 0 0 3 1 0
55390+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
55391+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
55392+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
55393+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55394+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55395+4 4 4 4 4 4
55396+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
55397+193 200 203 193 200 203 174 174 174 37 38 37 4 0 0 2 2 2
55398+5 5 5 4 4 4 3 2 2 1 1 2 13 20 25 10 87 144
55399+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 13 20 25
55400+13 20 25 22 40 52 10 87 144 18 97 151 18 97 151 26 108 161
55401+10 87 144 13 20 25 6 10 14 21 29 34 24 86 132 18 97 151
55402+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
55403+26 108 161 90 154 193 18 97 151 13 20 25 0 0 0 4 3 3
55404+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
55405+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
55406+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
55407+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55408+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55409+4 4 4 4 4 4
55410+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
55411+190 197 201 220 221 221 167 166 167 30 32 34 1 0 0 2 2 2
55412+5 5 5 4 4 4 4 4 5 2 5 5 4 5 7 13 20 25
55413+28 67 93 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
55414+10 87 144 10 87 144 18 97 151 10 87 144 18 97 151 18 97 151
55415+28 67 93 2 3 3 0 0 0 28 67 93 26 108 161 26 108 161
55416+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
55417+26 108 161 10 87 144 13 20 25 1 1 2 3 2 2 4 4 4
55418+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
55419+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
55420+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
55421+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55422+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55423+4 4 4 4 4 4
55424+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
55425+193 200 203 193 200 203 174 174 174 26 28 28 4 0 0 4 3 3
55426+5 5 5 4 4 4 4 4 4 4 4 5 1 1 2 2 5 5
55427+4 5 7 22 40 52 10 87 144 10 87 144 18 97 151 10 87 144
55428+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 18 97 151
55429+10 87 144 28 67 93 22 40 52 10 87 144 26 108 161 18 97 151
55430+18 97 151 18 97 151 26 108 161 26 108 161 26 108 161 26 108 161
55431+22 40 52 1 1 2 0 0 0 2 3 3 4 4 4 4 4 4
55432+4 4 4 5 5 5 4 4 4 0 0 0 26 28 28 131 129 131
55433+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
55434+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
55435+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55436+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55437+4 4 4 4 4 4
55438+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
55439+190 197 201 220 221 221 190 197 201 41 54 63 4 0 0 2 2 2
55440+6 6 6 4 4 4 4 4 4 4 4 5 4 4 5 3 3 3
55441+1 1 2 1 1 2 6 10 14 22 40 52 10 87 144 18 97 151
55442+18 97 151 10 87 144 10 87 144 10 87 144 18 97 151 10 87 144
55443+10 87 144 18 97 151 26 108 161 18 97 151 18 97 151 10 87 144
55444+26 108 161 26 108 161 26 108 161 10 87 144 28 67 93 6 10 14
55445+1 1 2 1 1 2 4 3 3 4 4 5 4 4 4 4 4 4
55446+5 5 5 5 5 5 1 1 1 4 0 0 37 51 59 137 136 137
55447+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
55448+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
55449+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55450+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55451+4 4 4 4 4 4
55452+4 0 0 4 0 0 60 73 81 220 221 221 193 200 203 174 174 174
55453+193 200 203 193 200 203 220 221 221 137 136 137 13 16 17 4 0 0
55454+2 2 2 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5
55455+4 4 5 4 3 3 1 1 2 4 5 7 13 20 25 28 67 93
55456+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
55457+10 87 144 18 97 151 18 97 151 10 87 144 18 97 151 26 108 161
55458+26 108 161 18 97 151 28 67 93 6 10 14 0 0 0 0 0 0
55459+2 3 3 4 5 5 4 4 5 4 4 4 4 4 4 5 5 5
55460+3 3 3 1 1 1 0 0 0 16 19 21 125 124 125 137 136 137
55461+131 129 131 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
55462+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
55463+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55464+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55465+4 4 4 4 4 4
55466+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
55467+193 200 203 190 197 201 220 221 221 220 221 221 153 152 153 30 32 34
55468+0 0 0 0 0 0 2 2 2 4 4 4 4 4 4 4 4 4
55469+4 4 4 4 5 5 4 5 7 1 1 2 1 1 2 4 5 7
55470+13 20 25 28 67 93 10 87 144 18 97 151 10 87 144 10 87 144
55471+10 87 144 10 87 144 10 87 144 18 97 151 26 108 161 18 97 151
55472+28 67 93 7 12 15 0 0 0 0 0 0 2 2 1 4 4 4
55473+4 5 5 4 5 5 4 4 4 4 4 4 3 3 3 0 0 0
55474+0 0 0 0 0 0 37 38 37 125 124 125 158 157 158 131 129 131
55475+125 124 125 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
55476+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
55477+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55478+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55479+4 4 4 4 4 4
55480+4 3 3 4 0 0 41 54 63 193 200 203 220 221 221 174 174 174
55481+193 200 203 193 200 203 193 200 203 220 221 221 244 246 246 193 200 203
55482+120 125 127 5 5 5 1 0 0 0 0 0 1 1 1 4 4 4
55483+4 4 4 4 4 4 4 5 5 4 5 5 4 4 5 1 1 2
55484+4 5 7 4 5 7 22 40 52 10 87 144 10 87 144 10 87 144
55485+10 87 144 10 87 144 18 97 151 10 87 144 10 87 144 13 20 25
55486+4 5 7 2 3 3 1 1 2 4 4 4 4 5 5 4 4 4
55487+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 1 2
55488+24 26 27 60 74 84 153 152 153 163 162 163 137 136 137 125 124 125
55489+125 124 125 125 124 125 125 124 125 137 136 137 125 124 125 26 28 28
55490+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
55491+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55492+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55493+4 4 4 4 4 4
55494+4 0 0 6 6 6 26 28 28 156 155 156 220 221 221 220 221 221
55495+174 174 174 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
55496+220 221 221 167 166 167 60 73 81 7 11 13 0 0 0 0 0 0
55497+3 3 3 4 4 4 4 4 4 4 4 4 4 4 5 4 4 5
55498+4 4 5 1 1 2 1 1 2 4 5 7 22 40 52 10 87 144
55499+10 87 144 10 87 144 10 87 144 22 40 52 4 5 7 1 1 2
55500+1 1 2 4 4 5 4 4 4 4 4 4 4 4 4 4 4 4
55501+5 5 5 2 2 2 0 0 0 4 0 0 16 19 21 60 73 81
55502+137 136 137 167 166 167 158 157 158 137 136 137 131 129 131 131 129 131
55503+125 124 125 125 124 125 131 129 131 155 154 155 60 74 84 5 7 8
55504+0 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55505+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55506+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55507+4 4 4 4 4 4
55508+5 5 5 4 0 0 4 0 0 60 73 81 193 200 203 220 221 221
55509+193 200 203 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
55510+220 221 221 220 221 221 220 221 221 137 136 137 43 57 68 6 6 6
55511+4 0 0 1 1 1 4 4 4 4 4 4 4 4 4 4 4 4
55512+4 4 5 4 4 5 3 2 2 1 1 2 2 5 5 13 20 25
55513+22 40 52 22 40 52 13 20 25 2 3 3 1 1 2 3 3 3
55514+4 5 7 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55515+1 1 1 0 0 0 2 3 3 41 54 63 131 129 131 166 165 166
55516+166 165 166 155 154 155 153 152 153 137 136 137 137 136 137 125 124 125
55517+125 124 125 137 136 137 137 136 137 125 124 125 37 38 37 4 3 3
55518+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
55519+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55520+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55521+4 4 4 4 4 4
55522+4 3 3 6 6 6 6 6 6 13 16 17 60 73 81 167 166 167
55523+220 221 221 220 221 221 220 221 221 193 200 203 193 200 203 193 200 203
55524+205 212 215 220 221 221 220 221 221 244 246 246 205 212 215 125 124 125
55525+24 26 27 0 0 0 0 0 0 2 2 2 5 5 5 5 5 5
55526+4 4 4 4 4 4 4 4 4 4 4 5 1 1 2 4 5 7
55527+4 5 7 4 5 7 1 1 2 3 2 2 4 4 5 4 4 4
55528+4 4 4 4 4 4 5 5 5 4 4 4 0 0 0 0 0 0
55529+2 0 0 26 28 28 125 124 125 174 174 174 174 174 174 166 165 166
55530+156 155 156 153 152 153 137 136 137 137 136 137 131 129 131 137 136 137
55531+137 136 137 137 136 137 60 74 84 30 32 34 4 0 0 4 0 0
55532+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55533+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55534+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55535+4 4 4 4 4 4
55536+5 5 5 6 6 6 4 0 0 4 0 0 6 6 6 26 28 28
55537+125 124 125 174 174 174 220 221 221 220 221 221 220 221 221 193 200 203
55538+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
55539+193 200 203 60 74 84 13 16 17 4 0 0 0 0 0 3 3 3
55540+5 5 5 5 5 5 4 4 4 4 4 4 4 4 5 3 3 3
55541+1 1 2 3 3 3 4 4 5 4 4 5 4 4 4 4 4 4
55542+5 5 5 5 5 5 2 2 2 0 0 0 0 0 0 13 16 17
55543+60 74 84 174 174 174 193 200 203 174 174 174 167 166 167 163 162 163
55544+153 152 153 153 152 153 137 136 137 137 136 137 153 152 153 137 136 137
55545+125 124 125 41 54 63 24 26 27 4 0 0 4 0 0 5 5 5
55546+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55547+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55548+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55549+4 4 4 4 4 4
55550+4 3 3 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
55551+6 6 6 37 38 37 131 129 131 220 221 221 220 221 221 220 221 221
55552+193 200 203 193 200 203 220 221 221 205 212 215 220 221 221 244 246 246
55553+244 246 246 244 246 246 174 174 174 41 54 63 0 0 0 0 0 0
55554+0 0 0 4 4 4 5 5 5 5 5 5 4 4 4 4 4 5
55555+4 4 5 4 4 5 4 4 4 4 4 4 6 6 6 6 6 6
55556+3 3 3 0 0 0 2 0 0 13 16 17 60 73 81 156 155 156
55557+220 221 221 193 200 203 174 174 174 165 164 165 163 162 163 154 153 154
55558+153 152 153 153 152 153 158 157 158 163 162 163 137 136 137 60 73 81
55559+13 16 17 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
55560+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55561+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55562+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55563+4 4 4 4 4 4
55564+5 5 5 4 3 3 4 3 3 6 6 6 6 6 6 6 6 6
55565+6 6 6 6 6 6 6 6 6 37 38 37 167 166 167 244 246 246
55566+244 246 246 220 221 221 205 212 215 205 212 215 220 221 221 193 200 203
55567+220 221 221 244 246 246 244 246 246 244 246 246 137 136 137 37 38 37
55568+3 2 2 0 0 0 1 1 1 5 5 5 5 5 5 4 4 4
55569+4 4 4 4 4 4 4 4 4 5 5 5 4 4 4 1 1 1
55570+0 0 0 5 5 5 43 57 68 153 152 153 193 200 203 220 221 221
55571+177 184 187 174 174 174 167 166 167 166 165 166 158 157 158 157 156 157
55572+158 157 158 166 165 166 156 155 156 85 115 134 13 16 17 4 0 0
55573+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
55574+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55575+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55576+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55577+4 4 4 4 4 4
55578+5 5 5 4 3 3 6 6 6 6 6 6 4 0 0 6 6 6
55579+6 6 6 6 6 6 6 6 6 6 6 6 13 16 17 60 73 81
55580+177 184 187 220 221 221 220 221 221 220 221 221 205 212 215 220 221 221
55581+220 221 221 205 212 215 220 221 221 244 246 246 244 246 246 205 212 215
55582+125 124 125 30 32 34 0 0 0 0 0 0 2 2 2 5 5 5
55583+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 0 0
55584+37 38 37 131 129 131 205 212 215 220 221 221 193 200 203 174 174 174
55585+174 174 174 174 174 174 167 166 167 165 164 165 166 165 166 167 166 167
55586+158 157 158 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
55587+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
55588+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55589+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55590+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55591+4 4 4 4 4 4
55592+4 4 4 5 5 5 4 3 3 4 3 3 6 6 6 6 6 6
55593+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
55594+26 28 28 125 124 125 205 212 215 220 221 221 220 221 221 220 221 221
55595+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
55596+244 246 246 190 197 201 60 74 84 16 19 21 4 0 0 0 0 0
55597+0 0 0 0 0 0 0 0 0 0 0 0 16 19 21 120 125 127
55598+177 184 187 220 221 221 205 212 215 177 184 187 174 174 174 177 184 187
55599+174 174 174 174 174 174 167 166 167 174 174 174 166 165 166 137 136 137
55600+60 73 81 13 16 17 4 0 0 4 0 0 4 3 3 6 6 6
55601+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55602+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55603+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55604+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55605+4 4 4 4 4 4
55606+5 5 5 4 3 3 5 5 5 4 3 3 6 6 6 4 0 0
55607+6 6 6 6 6 6 4 0 0 6 6 6 4 0 0 6 6 6
55608+6 6 6 6 6 6 37 38 37 137 136 137 193 200 203 220 221 221
55609+220 221 221 205 212 215 220 221 221 205 212 215 205 212 215 220 221 221
55610+220 221 221 220 221 221 244 246 246 166 165 166 43 57 68 2 2 2
55611+0 0 0 4 0 0 16 19 21 60 73 81 157 156 157 202 210 214
55612+220 221 221 193 200 203 177 184 187 177 184 187 177 184 187 174 174 174
55613+174 174 174 174 174 174 174 174 174 157 156 157 60 74 84 24 26 27
55614+4 0 0 4 0 0 4 0 0 6 6 6 4 4 4 4 4 4
55615+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55616+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55617+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55618+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55619+4 4 4 4 4 4
55620+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
55621+6 6 6 4 0 0 6 6 6 6 6 6 6 6 6 4 0 0
55622+4 0 0 4 0 0 6 6 6 24 26 27 60 73 81 167 166 167
55623+220 221 221 220 221 221 220 221 221 205 212 215 205 212 215 205 212 215
55624+205 212 215 220 221 221 220 221 221 220 221 221 205 212 215 137 136 137
55625+60 74 84 125 124 125 137 136 137 190 197 201 220 221 221 193 200 203
55626+177 184 187 177 184 187 177 184 187 174 174 174 174 174 174 177 184 187
55627+190 197 201 174 174 174 125 124 125 37 38 37 6 6 6 4 0 0
55628+4 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55629+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55630+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55631+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55632+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55633+4 4 4 4 4 4
55634+4 4 4 4 4 4 5 5 5 5 5 5 4 3 3 6 6 6
55635+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 6 6 6
55636+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
55637+125 124 125 193 200 203 244 246 246 220 221 221 205 212 215 205 212 215
55638+205 212 215 193 200 203 205 212 215 205 212 215 220 221 221 220 221 221
55639+193 200 203 193 200 203 205 212 215 193 200 203 193 200 203 177 184 187
55640+190 197 201 190 197 201 174 174 174 190 197 201 193 200 203 190 197 201
55641+153 152 153 60 73 81 4 0 0 4 0 0 4 0 0 3 2 2
55642+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55643+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55644+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55645+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55646+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55647+4 4 4 4 4 4
55648+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
55649+6 6 6 4 3 3 4 3 3 4 3 3 6 6 6 6 6 6
55650+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 4 0 0
55651+4 0 0 26 28 28 131 129 131 220 221 221 244 246 246 220 221 221
55652+205 212 215 193 200 203 205 212 215 193 200 203 193 200 203 205 212 215
55653+220 221 221 193 200 203 193 200 203 193 200 203 190 197 201 174 174 174
55654+174 174 174 190 197 201 193 200 203 193 200 203 167 166 167 125 124 125
55655+6 6 6 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
55656+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55657+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55658+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55659+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55660+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55661+4 4 4 4 4 4
55662+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
55663+5 5 5 4 3 3 5 5 5 6 6 6 4 3 3 5 5 5
55664+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
55665+4 0 0 4 0 0 6 6 6 41 54 63 158 157 158 220 221 221
55666+220 221 221 220 221 221 193 200 203 193 200 203 193 200 203 190 197 201
55667+190 197 201 190 197 201 190 197 201 190 197 201 174 174 174 193 200 203
55668+193 200 203 220 221 221 174 174 174 125 124 125 37 38 37 4 0 0
55669+4 0 0 4 3 3 6 6 6 4 4 4 4 4 4 4 4 4
55670+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55671+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55672+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55673+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55674+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55675+4 4 4 4 4 4
55676+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55677+4 4 4 5 5 5 4 3 3 4 3 3 4 3 3 5 5 5
55678+4 3 3 6 6 6 5 5 5 4 3 3 6 6 6 6 6 6
55679+6 6 6 6 6 6 4 0 0 4 0 0 13 16 17 60 73 81
55680+174 174 174 220 221 221 220 221 221 205 212 215 190 197 201 174 174 174
55681+193 200 203 174 174 174 190 197 201 174 174 174 193 200 203 220 221 221
55682+193 200 203 131 129 131 37 38 37 6 6 6 4 0 0 4 0 0
55683+6 6 6 6 6 6 4 3 3 5 5 5 4 4 4 4 4 4
55684+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55685+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55686+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55687+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55688+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55689+4 4 4 4 4 4
55690+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55691+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
55692+5 5 5 4 3 3 4 3 3 5 5 5 4 3 3 4 3 3
55693+5 5 5 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
55694+6 6 6 125 124 125 174 174 174 220 221 221 220 221 221 193 200 203
55695+193 200 203 193 200 203 193 200 203 193 200 203 220 221 221 158 157 158
55696+60 73 81 6 6 6 4 0 0 4 0 0 5 5 5 6 6 6
55697+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
55698+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55699+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55700+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55701+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55702+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55703+4 4 4 4 4 4
55704+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55705+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55706+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
55707+5 5 5 5 5 5 6 6 6 6 6 6 4 0 0 4 0 0
55708+4 0 0 4 0 0 26 28 28 125 124 125 174 174 174 193 200 203
55709+193 200 203 174 174 174 193 200 203 167 166 167 125 124 125 6 6 6
55710+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 5 5 5
55711+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
55712+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55713+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55714+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55715+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55716+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55717+4 4 4 4 4 4
55718+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55719+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55720+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
55721+4 3 3 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
55722+6 6 6 4 0 0 4 0 0 6 6 6 37 38 37 125 124 125
55723+153 152 153 131 129 131 125 124 125 37 38 37 6 6 6 6 6 6
55724+6 6 6 4 0 0 6 6 6 6 6 6 4 3 3 5 5 5
55725+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55726+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55727+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55728+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55729+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55730+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55731+4 4 4 4 4 4
55732+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55733+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55734+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55735+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
55736+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
55737+24 26 27 24 26 27 6 6 6 6 6 6 6 6 6 4 0 0
55738+6 6 6 6 6 6 4 0 0 6 6 6 5 5 5 4 3 3
55739+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55740+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55741+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55742+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55743+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55744+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55745+4 4 4 4 4 4
55746+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55747+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55748+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55749+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
55750+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
55751+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
55752+4 0 0 6 6 6 6 6 6 4 3 3 5 5 5 4 4 4
55753+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55754+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55755+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55756+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55757+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55758+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55759+4 4 4 4 4 4
55760+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55761+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55762+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55763+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 5 5 5
55764+5 5 5 5 5 5 4 0 0 6 6 6 4 0 0 6 6 6
55765+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 4 0 0
55766+6 6 6 4 3 3 5 5 5 4 3 3 5 5 5 4 4 4
55767+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55768+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55769+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55770+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55771+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55772+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55773+4 4 4 4 4 4
55774+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55775+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55776+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55777+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
55778+4 3 3 6 6 6 4 3 3 6 6 6 6 6 6 6 6 6
55779+4 0 0 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
55780+6 6 6 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
55781+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55782+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55783+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55784+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55785+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55786+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55787+4 4 4 4 4 4
55788+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55789+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55790+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55791+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55792+4 4 4 5 5 5 4 3 3 5 5 5 4 0 0 6 6 6
55793+6 6 6 4 0 0 6 6 6 6 6 6 4 0 0 6 6 6
55794+4 3 3 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
55795+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55796+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55797+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55798+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55799+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55800+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55801+4 4 4 4 4 4
55802+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55803+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55804+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55805+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55806+4 4 4 5 5 5 4 3 3 5 5 5 6 6 6 4 3 3
55807+4 3 3 6 6 6 6 6 6 4 3 3 6 6 6 4 3 3
55808+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55809+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55810+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55811+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55812+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55813+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55814+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55815+4 4 4 4 4 4
55816+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55817+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55818+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55819+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55820+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 6 6 6
55821+5 5 5 4 3 3 4 3 3 4 3 3 5 5 5 5 5 5
55822+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55823+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55824+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55825+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55826+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55827+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55828+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55829+4 4 4 4 4 4
55830+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55831+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55832+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55833+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55834+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
55835+5 5 5 4 3 3 5 5 5 5 5 5 4 4 4 4 4 4
55836+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55837+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55838+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55839+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55840+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55841+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55842+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55843+4 4 4 4 4 4
55844diff --git a/drivers/video/matrox/matroxfb_DAC1064.c b/drivers/video/matrox/matroxfb_DAC1064.c
55845index a01147f..5d896f8 100644
55846--- a/drivers/video/matrox/matroxfb_DAC1064.c
55847+++ b/drivers/video/matrox/matroxfb_DAC1064.c
55848@@ -1088,14 +1088,20 @@ static void MGAG100_restore(struct matrox_fb_info *minfo)
55849
55850 #ifdef CONFIG_FB_MATROX_MYSTIQUE
55851 struct matrox_switch matrox_mystique = {
55852- MGA1064_preinit, MGA1064_reset, MGA1064_init, MGA1064_restore,
55853+ .preinit = MGA1064_preinit,
55854+ .reset = MGA1064_reset,
55855+ .init = MGA1064_init,
55856+ .restore = MGA1064_restore,
55857 };
55858 EXPORT_SYMBOL(matrox_mystique);
55859 #endif
55860
55861 #ifdef CONFIG_FB_MATROX_G
55862 struct matrox_switch matrox_G100 = {
55863- MGAG100_preinit, MGAG100_reset, MGAG100_init, MGAG100_restore,
55864+ .preinit = MGAG100_preinit,
55865+ .reset = MGAG100_reset,
55866+ .init = MGAG100_init,
55867+ .restore = MGAG100_restore,
55868 };
55869 EXPORT_SYMBOL(matrox_G100);
55870 #endif
55871diff --git a/drivers/video/matrox/matroxfb_Ti3026.c b/drivers/video/matrox/matroxfb_Ti3026.c
55872index 195ad7c..09743fc 100644
55873--- a/drivers/video/matrox/matroxfb_Ti3026.c
55874+++ b/drivers/video/matrox/matroxfb_Ti3026.c
55875@@ -738,7 +738,10 @@ static int Ti3026_preinit(struct matrox_fb_info *minfo)
55876 }
55877
55878 struct matrox_switch matrox_millennium = {
55879- Ti3026_preinit, Ti3026_reset, Ti3026_init, Ti3026_restore
55880+ .preinit = Ti3026_preinit,
55881+ .reset = Ti3026_reset,
55882+ .init = Ti3026_init,
55883+ .restore = Ti3026_restore
55884 };
55885 EXPORT_SYMBOL(matrox_millennium);
55886 #endif
55887diff --git a/drivers/video/mb862xx/mb862xxfb_accel.c b/drivers/video/mb862xx/mb862xxfb_accel.c
55888index fe92eed..106e085 100644
55889--- a/drivers/video/mb862xx/mb862xxfb_accel.c
55890+++ b/drivers/video/mb862xx/mb862xxfb_accel.c
55891@@ -312,14 +312,18 @@ void mb862xxfb_init_accel(struct fb_info *info, int xres)
55892 struct mb862xxfb_par *par = info->par;
55893
55894 if (info->var.bits_per_pixel == 32) {
55895- info->fbops->fb_fillrect = cfb_fillrect;
55896- info->fbops->fb_copyarea = cfb_copyarea;
55897- info->fbops->fb_imageblit = cfb_imageblit;
55898+ pax_open_kernel();
55899+ *(void **)&info->fbops->fb_fillrect = cfb_fillrect;
55900+ *(void **)&info->fbops->fb_copyarea = cfb_copyarea;
55901+ *(void **)&info->fbops->fb_imageblit = cfb_imageblit;
55902+ pax_close_kernel();
55903 } else {
55904 outreg(disp, GC_L0EM, 3);
55905- info->fbops->fb_fillrect = mb86290fb_fillrect;
55906- info->fbops->fb_copyarea = mb86290fb_copyarea;
55907- info->fbops->fb_imageblit = mb86290fb_imageblit;
55908+ pax_open_kernel();
55909+ *(void **)&info->fbops->fb_fillrect = mb86290fb_fillrect;
55910+ *(void **)&info->fbops->fb_copyarea = mb86290fb_copyarea;
55911+ *(void **)&info->fbops->fb_imageblit = mb86290fb_imageblit;
55912+ pax_close_kernel();
55913 }
55914 outreg(draw, GDC_REG_DRAW_BASE, 0);
55915 outreg(draw, GDC_REG_MODE_MISC, 0x8000);
55916diff --git a/drivers/video/nvidia/nvidia.c b/drivers/video/nvidia/nvidia.c
55917index def0412..fed6529 100644
55918--- a/drivers/video/nvidia/nvidia.c
55919+++ b/drivers/video/nvidia/nvidia.c
55920@@ -669,19 +669,23 @@ static int nvidiafb_set_par(struct fb_info *info)
55921 info->fix.line_length = (info->var.xres_virtual *
55922 info->var.bits_per_pixel) >> 3;
55923 if (info->var.accel_flags) {
55924- info->fbops->fb_imageblit = nvidiafb_imageblit;
55925- info->fbops->fb_fillrect = nvidiafb_fillrect;
55926- info->fbops->fb_copyarea = nvidiafb_copyarea;
55927- info->fbops->fb_sync = nvidiafb_sync;
55928+ pax_open_kernel();
55929+ *(void **)&info->fbops->fb_imageblit = nvidiafb_imageblit;
55930+ *(void **)&info->fbops->fb_fillrect = nvidiafb_fillrect;
55931+ *(void **)&info->fbops->fb_copyarea = nvidiafb_copyarea;
55932+ *(void **)&info->fbops->fb_sync = nvidiafb_sync;
55933+ pax_close_kernel();
55934 info->pixmap.scan_align = 4;
55935 info->flags &= ~FBINFO_HWACCEL_DISABLED;
55936 info->flags |= FBINFO_READS_FAST;
55937 NVResetGraphics(info);
55938 } else {
55939- info->fbops->fb_imageblit = cfb_imageblit;
55940- info->fbops->fb_fillrect = cfb_fillrect;
55941- info->fbops->fb_copyarea = cfb_copyarea;
55942- info->fbops->fb_sync = NULL;
55943+ pax_open_kernel();
55944+ *(void **)&info->fbops->fb_imageblit = cfb_imageblit;
55945+ *(void **)&info->fbops->fb_fillrect = cfb_fillrect;
55946+ *(void **)&info->fbops->fb_copyarea = cfb_copyarea;
55947+ *(void **)&info->fbops->fb_sync = NULL;
55948+ pax_close_kernel();
55949 info->pixmap.scan_align = 1;
55950 info->flags |= FBINFO_HWACCEL_DISABLED;
55951 info->flags &= ~FBINFO_READS_FAST;
55952@@ -1173,8 +1177,11 @@ static int nvidia_set_fbinfo(struct fb_info *info)
55953 info->pixmap.size = 8 * 1024;
55954 info->pixmap.flags = FB_PIXMAP_SYSTEM;
55955
55956- if (!hwcur)
55957- info->fbops->fb_cursor = NULL;
55958+ if (!hwcur) {
55959+ pax_open_kernel();
55960+ *(void **)&info->fbops->fb_cursor = NULL;
55961+ pax_close_kernel();
55962+ }
55963
55964 info->var.accel_flags = (!noaccel);
55965
55966diff --git a/drivers/video/omap2/dss/display.c b/drivers/video/omap2/dss/display.c
55967index 669a81f..e216d76 100644
55968--- a/drivers/video/omap2/dss/display.c
55969+++ b/drivers/video/omap2/dss/display.c
55970@@ -137,12 +137,14 @@ int omapdss_register_display(struct omap_dss_device *dssdev)
55971 snprintf(dssdev->alias, sizeof(dssdev->alias),
55972 "display%d", disp_num_counter++);
55973
55974+ pax_open_kernel();
55975 if (drv && drv->get_resolution == NULL)
55976- drv->get_resolution = omapdss_default_get_resolution;
55977+ *(void **)&drv->get_resolution = omapdss_default_get_resolution;
55978 if (drv && drv->get_recommended_bpp == NULL)
55979- drv->get_recommended_bpp = omapdss_default_get_recommended_bpp;
55980+ *(void **)&drv->get_recommended_bpp = omapdss_default_get_recommended_bpp;
55981 if (drv && drv->get_timings == NULL)
55982- drv->get_timings = omapdss_default_get_timings;
55983+ *(void **)&drv->get_timings = omapdss_default_get_timings;
55984+ pax_close_kernel();
55985
55986 mutex_lock(&panel_list_mutex);
55987 list_add_tail(&dssdev->panel_list, &panel_list);
55988diff --git a/drivers/video/s1d13xxxfb.c b/drivers/video/s1d13xxxfb.c
55989index 83433cb..71e9b98 100644
55990--- a/drivers/video/s1d13xxxfb.c
55991+++ b/drivers/video/s1d13xxxfb.c
55992@@ -881,8 +881,10 @@ static int s1d13xxxfb_probe(struct platform_device *pdev)
55993
55994 switch(prod_id) {
55995 case S1D13506_PROD_ID: /* activate acceleration */
55996- s1d13xxxfb_fbops.fb_fillrect = s1d13xxxfb_bitblt_solidfill;
55997- s1d13xxxfb_fbops.fb_copyarea = s1d13xxxfb_bitblt_copyarea;
55998+ pax_open_kernel();
55999+ *(void **)&s1d13xxxfb_fbops.fb_fillrect = s1d13xxxfb_bitblt_solidfill;
56000+ *(void **)&s1d13xxxfb_fbops.fb_copyarea = s1d13xxxfb_bitblt_copyarea;
56001+ pax_close_kernel();
56002 info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_YPAN |
56003 FBINFO_HWACCEL_FILLRECT | FBINFO_HWACCEL_COPYAREA;
56004 break;
56005diff --git a/drivers/video/sh_mobile_lcdcfb.c b/drivers/video/sh_mobile_lcdcfb.c
56006index 2bcc84a..29dd1ea 100644
56007--- a/drivers/video/sh_mobile_lcdcfb.c
56008+++ b/drivers/video/sh_mobile_lcdcfb.c
56009@@ -439,9 +439,9 @@ static unsigned long lcdc_sys_read_data(void *handle)
56010 }
56011
56012 static struct sh_mobile_lcdc_sys_bus_ops sh_mobile_lcdc_sys_bus_ops = {
56013- lcdc_sys_write_index,
56014- lcdc_sys_write_data,
56015- lcdc_sys_read_data,
56016+ .write_index = lcdc_sys_write_index,
56017+ .write_data = lcdc_sys_write_data,
56018+ .read_data = lcdc_sys_read_data,
56019 };
56020
56021 static int sh_mobile_lcdc_sginit(struct fb_info *info,
56022diff --git a/drivers/video/smscufx.c b/drivers/video/smscufx.c
56023index d513ed6..90b0de9 100644
56024--- a/drivers/video/smscufx.c
56025+++ b/drivers/video/smscufx.c
56026@@ -1175,7 +1175,9 @@ static int ufx_ops_release(struct fb_info *info, int user)
56027 fb_deferred_io_cleanup(info);
56028 kfree(info->fbdefio);
56029 info->fbdefio = NULL;
56030- info->fbops->fb_mmap = ufx_ops_mmap;
56031+ pax_open_kernel();
56032+ *(void **)&info->fbops->fb_mmap = ufx_ops_mmap;
56033+ pax_close_kernel();
56034 }
56035
56036 pr_debug("released /dev/fb%d user=%d count=%d",
56037diff --git a/drivers/video/udlfb.c b/drivers/video/udlfb.c
56038index 77b890e..458e666 100644
56039--- a/drivers/video/udlfb.c
56040+++ b/drivers/video/udlfb.c
56041@@ -623,11 +623,11 @@ static int dlfb_handle_damage(struct dlfb_data *dev, int x, int y,
56042 dlfb_urb_completion(urb);
56043
56044 error:
56045- atomic_add(bytes_sent, &dev->bytes_sent);
56046- atomic_add(bytes_identical, &dev->bytes_identical);
56047- atomic_add(width*height*2, &dev->bytes_rendered);
56048+ atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
56049+ atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
56050+ atomic_add_unchecked(width*height*2, &dev->bytes_rendered);
56051 end_cycles = get_cycles();
56052- atomic_add(((unsigned int) ((end_cycles - start_cycles)
56053+ atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
56054 >> 10)), /* Kcycles */
56055 &dev->cpu_kcycles_used);
56056
56057@@ -748,11 +748,11 @@ static void dlfb_dpy_deferred_io(struct fb_info *info,
56058 dlfb_urb_completion(urb);
56059
56060 error:
56061- atomic_add(bytes_sent, &dev->bytes_sent);
56062- atomic_add(bytes_identical, &dev->bytes_identical);
56063- atomic_add(bytes_rendered, &dev->bytes_rendered);
56064+ atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
56065+ atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
56066+ atomic_add_unchecked(bytes_rendered, &dev->bytes_rendered);
56067 end_cycles = get_cycles();
56068- atomic_add(((unsigned int) ((end_cycles - start_cycles)
56069+ atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
56070 >> 10)), /* Kcycles */
56071 &dev->cpu_kcycles_used);
56072 }
56073@@ -993,7 +993,9 @@ static int dlfb_ops_release(struct fb_info *info, int user)
56074 fb_deferred_io_cleanup(info);
56075 kfree(info->fbdefio);
56076 info->fbdefio = NULL;
56077- info->fbops->fb_mmap = dlfb_ops_mmap;
56078+ pax_open_kernel();
56079+ *(void **)&info->fbops->fb_mmap = dlfb_ops_mmap;
56080+ pax_close_kernel();
56081 }
56082
56083 pr_warn("released /dev/fb%d user=%d count=%d\n",
56084@@ -1376,7 +1378,7 @@ static ssize_t metrics_bytes_rendered_show(struct device *fbdev,
56085 struct fb_info *fb_info = dev_get_drvdata(fbdev);
56086 struct dlfb_data *dev = fb_info->par;
56087 return snprintf(buf, PAGE_SIZE, "%u\n",
56088- atomic_read(&dev->bytes_rendered));
56089+ atomic_read_unchecked(&dev->bytes_rendered));
56090 }
56091
56092 static ssize_t metrics_bytes_identical_show(struct device *fbdev,
56093@@ -1384,7 +1386,7 @@ static ssize_t metrics_bytes_identical_show(struct device *fbdev,
56094 struct fb_info *fb_info = dev_get_drvdata(fbdev);
56095 struct dlfb_data *dev = fb_info->par;
56096 return snprintf(buf, PAGE_SIZE, "%u\n",
56097- atomic_read(&dev->bytes_identical));
56098+ atomic_read_unchecked(&dev->bytes_identical));
56099 }
56100
56101 static ssize_t metrics_bytes_sent_show(struct device *fbdev,
56102@@ -1392,7 +1394,7 @@ static ssize_t metrics_bytes_sent_show(struct device *fbdev,
56103 struct fb_info *fb_info = dev_get_drvdata(fbdev);
56104 struct dlfb_data *dev = fb_info->par;
56105 return snprintf(buf, PAGE_SIZE, "%u\n",
56106- atomic_read(&dev->bytes_sent));
56107+ atomic_read_unchecked(&dev->bytes_sent));
56108 }
56109
56110 static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
56111@@ -1400,7 +1402,7 @@ static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
56112 struct fb_info *fb_info = dev_get_drvdata(fbdev);
56113 struct dlfb_data *dev = fb_info->par;
56114 return snprintf(buf, PAGE_SIZE, "%u\n",
56115- atomic_read(&dev->cpu_kcycles_used));
56116+ atomic_read_unchecked(&dev->cpu_kcycles_used));
56117 }
56118
56119 static ssize_t edid_show(
56120@@ -1460,10 +1462,10 @@ static ssize_t metrics_reset_store(struct device *fbdev,
56121 struct fb_info *fb_info = dev_get_drvdata(fbdev);
56122 struct dlfb_data *dev = fb_info->par;
56123
56124- atomic_set(&dev->bytes_rendered, 0);
56125- atomic_set(&dev->bytes_identical, 0);
56126- atomic_set(&dev->bytes_sent, 0);
56127- atomic_set(&dev->cpu_kcycles_used, 0);
56128+ atomic_set_unchecked(&dev->bytes_rendered, 0);
56129+ atomic_set_unchecked(&dev->bytes_identical, 0);
56130+ atomic_set_unchecked(&dev->bytes_sent, 0);
56131+ atomic_set_unchecked(&dev->cpu_kcycles_used, 0);
56132
56133 return count;
56134 }
56135diff --git a/drivers/video/uvesafb.c b/drivers/video/uvesafb.c
56136index 256fba7..6e75516 100644
56137--- a/drivers/video/uvesafb.c
56138+++ b/drivers/video/uvesafb.c
56139@@ -19,6 +19,7 @@
56140 #include <linux/io.h>
56141 #include <linux/mutex.h>
56142 #include <linux/slab.h>
56143+#include <linux/moduleloader.h>
56144 #include <video/edid.h>
56145 #include <video/uvesafb.h>
56146 #ifdef CONFIG_X86
56147@@ -565,10 +566,32 @@ static int uvesafb_vbe_getpmi(struct uvesafb_ktask *task,
56148 if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
56149 par->pmi_setpal = par->ypan = 0;
56150 } else {
56151+
56152+#ifdef CONFIG_PAX_KERNEXEC
56153+#ifdef CONFIG_MODULES
56154+ par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
56155+#endif
56156+ if (!par->pmi_code) {
56157+ par->pmi_setpal = par->ypan = 0;
56158+ return 0;
56159+ }
56160+#endif
56161+
56162 par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
56163 + task->t.regs.edi);
56164+
56165+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
56166+ pax_open_kernel();
56167+ memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
56168+ pax_close_kernel();
56169+
56170+ par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
56171+ par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
56172+#else
56173 par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
56174 par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
56175+#endif
56176+
56177 printk(KERN_INFO "uvesafb: protected mode interface info at "
56178 "%04x:%04x\n",
56179 (u16)task->t.regs.es, (u16)task->t.regs.edi);
56180@@ -813,13 +836,14 @@ static int uvesafb_vbe_init(struct fb_info *info)
56181 par->ypan = ypan;
56182
56183 if (par->pmi_setpal || par->ypan) {
56184+#if !defined(CONFIG_MODULES) || !defined(CONFIG_PAX_KERNEXEC)
56185 if (__supported_pte_mask & _PAGE_NX) {
56186 par->pmi_setpal = par->ypan = 0;
56187 printk(KERN_WARNING "uvesafb: NX protection is active, "
56188 "better not use the PMI.\n");
56189- } else {
56190+ } else
56191+#endif
56192 uvesafb_vbe_getpmi(task, par);
56193- }
56194 }
56195 #else
56196 /* The protected mode interface is not available on non-x86. */
56197@@ -1453,8 +1477,11 @@ static void uvesafb_init_info(struct fb_info *info, struct vbe_mode_ib *mode)
56198 info->fix.ywrapstep = (par->ypan > 1) ? 1 : 0;
56199
56200 /* Disable blanking if the user requested so. */
56201- if (!blank)
56202- info->fbops->fb_blank = NULL;
56203+ if (!blank) {
56204+ pax_open_kernel();
56205+ *(void **)&info->fbops->fb_blank = NULL;
56206+ pax_close_kernel();
56207+ }
56208
56209 /*
56210 * Find out how much IO memory is required for the mode with
56211@@ -1530,8 +1557,11 @@ static void uvesafb_init_info(struct fb_info *info, struct vbe_mode_ib *mode)
56212 info->flags = FBINFO_FLAG_DEFAULT |
56213 (par->ypan ? FBINFO_HWACCEL_YPAN : 0);
56214
56215- if (!par->ypan)
56216- info->fbops->fb_pan_display = NULL;
56217+ if (!par->ypan) {
56218+ pax_open_kernel();
56219+ *(void **)&info->fbops->fb_pan_display = NULL;
56220+ pax_close_kernel();
56221+ }
56222 }
56223
56224 static void uvesafb_init_mtrr(struct fb_info *info)
56225@@ -1792,6 +1822,11 @@ out_mode:
56226 out:
56227 kfree(par->vbe_modes);
56228
56229+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
56230+ if (par->pmi_code)
56231+ module_free_exec(NULL, par->pmi_code);
56232+#endif
56233+
56234 framebuffer_release(info);
56235 return err;
56236 }
56237@@ -1816,6 +1851,12 @@ static int uvesafb_remove(struct platform_device *dev)
56238 kfree(par->vbe_modes);
56239 kfree(par->vbe_state_orig);
56240 kfree(par->vbe_state_saved);
56241+
56242+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
56243+ if (par->pmi_code)
56244+ module_free_exec(NULL, par->pmi_code);
56245+#endif
56246+
56247 }
56248
56249 framebuffer_release(info);
56250diff --git a/drivers/video/vesafb.c b/drivers/video/vesafb.c
56251index 1c7da3b..56ea0bd 100644
56252--- a/drivers/video/vesafb.c
56253+++ b/drivers/video/vesafb.c
56254@@ -9,6 +9,7 @@
56255 */
56256
56257 #include <linux/module.h>
56258+#include <linux/moduleloader.h>
56259 #include <linux/kernel.h>
56260 #include <linux/errno.h>
56261 #include <linux/string.h>
56262@@ -52,8 +53,8 @@ static int vram_remap; /* Set amount of memory to be used */
56263 static int vram_total; /* Set total amount of memory */
56264 static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
56265 static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
56266-static void (*pmi_start)(void) __read_mostly;
56267-static void (*pmi_pal) (void) __read_mostly;
56268+static void (*pmi_start)(void) __read_only;
56269+static void (*pmi_pal) (void) __read_only;
56270 static int depth __read_mostly;
56271 static int vga_compat __read_mostly;
56272 /* --------------------------------------------------------------------- */
56273@@ -234,6 +235,7 @@ static int vesafb_probe(struct platform_device *dev)
56274 unsigned int size_remap;
56275 unsigned int size_total;
56276 char *option = NULL;
56277+ void *pmi_code = NULL;
56278
56279 /* ignore error return of fb_get_options */
56280 fb_get_options("vesafb", &option);
56281@@ -280,10 +282,6 @@ static int vesafb_probe(struct platform_device *dev)
56282 size_remap = size_total;
56283 vesafb_fix.smem_len = size_remap;
56284
56285-#ifndef __i386__
56286- screen_info.vesapm_seg = 0;
56287-#endif
56288-
56289 if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
56290 printk(KERN_WARNING
56291 "vesafb: cannot reserve video memory at 0x%lx\n",
56292@@ -312,9 +310,21 @@ static int vesafb_probe(struct platform_device *dev)
56293 printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
56294 vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
56295
56296+#ifdef __i386__
56297+
56298+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
56299+ pmi_code = module_alloc_exec(screen_info.vesapm_size);
56300+ if (!pmi_code)
56301+#elif !defined(CONFIG_PAX_KERNEXEC)
56302+ if (0)
56303+#endif
56304+
56305+#endif
56306+ screen_info.vesapm_seg = 0;
56307+
56308 if (screen_info.vesapm_seg) {
56309- printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
56310- screen_info.vesapm_seg,screen_info.vesapm_off);
56311+ printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
56312+ screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
56313 }
56314
56315 if (screen_info.vesapm_seg < 0xc000)
56316@@ -322,9 +332,25 @@ static int vesafb_probe(struct platform_device *dev)
56317
56318 if (ypan || pmi_setpal) {
56319 unsigned short *pmi_base;
56320+
56321 pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
56322- pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
56323- pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
56324+
56325+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
56326+ pax_open_kernel();
56327+ memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
56328+#else
56329+ pmi_code = pmi_base;
56330+#endif
56331+
56332+ pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
56333+ pmi_pal = (void*)((char*)pmi_code + pmi_base[2]);
56334+
56335+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
56336+ pmi_start = ktva_ktla(pmi_start);
56337+ pmi_pal = ktva_ktla(pmi_pal);
56338+ pax_close_kernel();
56339+#endif
56340+
56341 printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
56342 if (pmi_base[3]) {
56343 printk(KERN_INFO "vesafb: pmi: ports = ");
56344@@ -477,8 +503,11 @@ static int vesafb_probe(struct platform_device *dev)
56345 info->flags = FBINFO_FLAG_DEFAULT | FBINFO_MISC_FIRMWARE |
56346 (ypan ? FBINFO_HWACCEL_YPAN : 0);
56347
56348- if (!ypan)
56349- info->fbops->fb_pan_display = NULL;
56350+ if (!ypan) {
56351+ pax_open_kernel();
56352+ *(void **)&info->fbops->fb_pan_display = NULL;
56353+ pax_close_kernel();
56354+ }
56355
56356 if (fb_alloc_cmap(&info->cmap, 256, 0) < 0) {
56357 err = -ENOMEM;
56358@@ -492,6 +521,11 @@ static int vesafb_probe(struct platform_device *dev)
56359 fb_info(info, "%s frame buffer device\n", info->fix.id);
56360 return 0;
56361 err:
56362+
56363+#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
56364+ module_free_exec(NULL, pmi_code);
56365+#endif
56366+
56367 if (info->screen_base)
56368 iounmap(info->screen_base);
56369 framebuffer_release(info);
56370diff --git a/drivers/video/via/via_clock.h b/drivers/video/via/via_clock.h
56371index 88714ae..16c2e11 100644
56372--- a/drivers/video/via/via_clock.h
56373+++ b/drivers/video/via/via_clock.h
56374@@ -56,7 +56,7 @@ struct via_clock {
56375
56376 void (*set_engine_pll_state)(u8 state);
56377 void (*set_engine_pll)(struct via_pll_config config);
56378-};
56379+} __no_const;
56380
56381
56382 static inline u32 get_pll_internal_frequency(u32 ref_freq,
56383diff --git a/drivers/xen/xenfs/xenstored.c b/drivers/xen/xenfs/xenstored.c
56384index fef20db..d28b1ab 100644
56385--- a/drivers/xen/xenfs/xenstored.c
56386+++ b/drivers/xen/xenfs/xenstored.c
56387@@ -24,7 +24,12 @@ static int xsd_release(struct inode *inode, struct file *file)
56388 static int xsd_kva_open(struct inode *inode, struct file *file)
56389 {
56390 file->private_data = (void *)kasprintf(GFP_KERNEL, "0x%p",
56391+#ifdef CONFIG_GRKERNSEC_HIDESYM
56392+ NULL);
56393+#else
56394 xen_store_interface);
56395+#endif
56396+
56397 if (!file->private_data)
56398 return -ENOMEM;
56399 return 0;
56400diff --git a/fs/9p/vfs_addr.c b/fs/9p/vfs_addr.c
56401index c71e886..61d3d44b 100644
56402--- a/fs/9p/vfs_addr.c
56403+++ b/fs/9p/vfs_addr.c
56404@@ -187,7 +187,7 @@ static int v9fs_vfs_writepage_locked(struct page *page)
56405
56406 retval = v9fs_file_write_internal(inode,
56407 v9inode->writeback_fid,
56408- (__force const char __user *)buffer,
56409+ (const char __force_user *)buffer,
56410 len, &offset, 0);
56411 if (retval > 0)
56412 retval = 0;
56413diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
56414index bb7991c..481e21a 100644
56415--- a/fs/9p/vfs_inode.c
56416+++ b/fs/9p/vfs_inode.c
56417@@ -1312,7 +1312,7 @@ static void *v9fs_vfs_follow_link(struct dentry *dentry, struct nameidata *nd)
56418 void
56419 v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
56420 {
56421- char *s = nd_get_link(nd);
56422+ const char *s = nd_get_link(nd);
56423
56424 p9_debug(P9_DEBUG_VFS, " %s %s\n",
56425 dentry->d_name.name, IS_ERR(s) ? "<error>" : s);
56426diff --git a/fs/Kconfig.binfmt b/fs/Kconfig.binfmt
56427index 370b24c..ff0be7b 100644
56428--- a/fs/Kconfig.binfmt
56429+++ b/fs/Kconfig.binfmt
56430@@ -103,7 +103,7 @@ config HAVE_AOUT
56431
56432 config BINFMT_AOUT
56433 tristate "Kernel support for a.out and ECOFF binaries"
56434- depends on HAVE_AOUT
56435+ depends on HAVE_AOUT && BROKEN
56436 ---help---
56437 A.out (Assembler.OUTput) is a set of formats for libraries and
56438 executables used in the earliest versions of UNIX. Linux used
56439diff --git a/fs/afs/inode.c b/fs/afs/inode.c
56440index ce25d75..dc09eeb 100644
56441--- a/fs/afs/inode.c
56442+++ b/fs/afs/inode.c
56443@@ -141,7 +141,7 @@ struct inode *afs_iget_autocell(struct inode *dir, const char *dev_name,
56444 struct afs_vnode *vnode;
56445 struct super_block *sb;
56446 struct inode *inode;
56447- static atomic_t afs_autocell_ino;
56448+ static atomic_unchecked_t afs_autocell_ino;
56449
56450 _enter("{%x:%u},%*.*s,",
56451 AFS_FS_I(dir)->fid.vid, AFS_FS_I(dir)->fid.vnode,
56452@@ -154,7 +154,7 @@ struct inode *afs_iget_autocell(struct inode *dir, const char *dev_name,
56453 data.fid.unique = 0;
56454 data.fid.vnode = 0;
56455
56456- inode = iget5_locked(sb, atomic_inc_return(&afs_autocell_ino),
56457+ inode = iget5_locked(sb, atomic_inc_return_unchecked(&afs_autocell_ino),
56458 afs_iget5_autocell_test, afs_iget5_set,
56459 &data);
56460 if (!inode) {
56461diff --git a/fs/aio.c b/fs/aio.c
56462index 04cd768..25949c1 100644
56463--- a/fs/aio.c
56464+++ b/fs/aio.c
56465@@ -375,7 +375,7 @@ static int aio_setup_ring(struct kioctx *ctx)
56466 size += sizeof(struct io_event) * nr_events;
56467
56468 nr_pages = PFN_UP(size);
56469- if (nr_pages < 0)
56470+ if (nr_pages <= 0)
56471 return -EINVAL;
56472
56473 file = aio_private_file(ctx, nr_pages);
56474diff --git a/fs/attr.c b/fs/attr.c
56475index 5d4e59d..fd02418 100644
56476--- a/fs/attr.c
56477+++ b/fs/attr.c
56478@@ -102,6 +102,7 @@ int inode_newsize_ok(const struct inode *inode, loff_t offset)
56479 unsigned long limit;
56480
56481 limit = rlimit(RLIMIT_FSIZE);
56482+ gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
56483 if (limit != RLIM_INFINITY && offset > limit)
56484 goto out_sig;
56485 if (offset > inode->i_sb->s_maxbytes)
56486diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c
56487index 116fd38..c04182da 100644
56488--- a/fs/autofs4/waitq.c
56489+++ b/fs/autofs4/waitq.c
56490@@ -59,7 +59,7 @@ static int autofs4_write(struct autofs_sb_info *sbi,
56491 {
56492 unsigned long sigpipe, flags;
56493 mm_segment_t fs;
56494- const char *data = (const char *)addr;
56495+ const char __user *data = (const char __force_user *)addr;
56496 ssize_t wr = 0;
56497
56498 sigpipe = sigismember(&current->pending.signal, SIGPIPE);
56499@@ -340,6 +340,10 @@ static int validate_request(struct autofs_wait_queue **wait,
56500 return 1;
56501 }
56502
56503+#ifdef CONFIG_GRKERNSEC_HIDESYM
56504+static atomic_unchecked_t autofs_dummy_name_id = ATOMIC_INIT(0);
56505+#endif
56506+
56507 int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry,
56508 enum autofs_notify notify)
56509 {
56510@@ -385,7 +389,12 @@ int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry,
56511
56512 /* If this is a direct mount request create a dummy name */
56513 if (IS_ROOT(dentry) && autofs_type_trigger(sbi->type))
56514+#ifdef CONFIG_GRKERNSEC_HIDESYM
56515+ /* this name does get written to userland via autofs4_write() */
56516+ qstr.len = sprintf(name, "%08x", atomic_inc_return_unchecked(&autofs_dummy_name_id));
56517+#else
56518 qstr.len = sprintf(name, "%p", dentry);
56519+#endif
56520 else {
56521 qstr.len = autofs4_getpath(sbi, dentry, &name);
56522 if (!qstr.len) {
56523diff --git a/fs/befs/endian.h b/fs/befs/endian.h
56524index 2722387..56059b5 100644
56525--- a/fs/befs/endian.h
56526+++ b/fs/befs/endian.h
56527@@ -11,7 +11,7 @@
56528
56529 #include <asm/byteorder.h>
56530
56531-static inline u64
56532+static inline u64 __intentional_overflow(-1)
56533 fs64_to_cpu(const struct super_block *sb, fs64 n)
56534 {
56535 if (BEFS_SB(sb)->byte_order == BEFS_BYTESEX_LE)
56536@@ -29,7 +29,7 @@ cpu_to_fs64(const struct super_block *sb, u64 n)
56537 return (__force fs64)cpu_to_be64(n);
56538 }
56539
56540-static inline u32
56541+static inline u32 __intentional_overflow(-1)
56542 fs32_to_cpu(const struct super_block *sb, fs32 n)
56543 {
56544 if (BEFS_SB(sb)->byte_order == BEFS_BYTESEX_LE)
56545@@ -47,7 +47,7 @@ cpu_to_fs32(const struct super_block *sb, u32 n)
56546 return (__force fs32)cpu_to_be32(n);
56547 }
56548
56549-static inline u16
56550+static inline u16 __intentional_overflow(-1)
56551 fs16_to_cpu(const struct super_block *sb, fs16 n)
56552 {
56553 if (BEFS_SB(sb)->byte_order == BEFS_BYTESEX_LE)
56554diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c
56555index ca0ba15..0fa3257 100644
56556--- a/fs/binfmt_aout.c
56557+++ b/fs/binfmt_aout.c
56558@@ -16,6 +16,7 @@
56559 #include <linux/string.h>
56560 #include <linux/fs.h>
56561 #include <linux/file.h>
56562+#include <linux/security.h>
56563 #include <linux/stat.h>
56564 #include <linux/fcntl.h>
56565 #include <linux/ptrace.h>
56566@@ -58,6 +59,8 @@ static int aout_core_dump(struct coredump_params *cprm)
56567 #endif
56568 # define START_STACK(u) ((void __user *)u.start_stack)
56569
56570+ memset(&dump, 0, sizeof(dump));
56571+
56572 fs = get_fs();
56573 set_fs(KERNEL_DS);
56574 has_dumped = 1;
56575@@ -68,10 +71,12 @@ static int aout_core_dump(struct coredump_params *cprm)
56576
56577 /* If the size of the dump file exceeds the rlimit, then see what would happen
56578 if we wrote the stack, but not the data area. */
56579+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
56580 if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > cprm->limit)
56581 dump.u_dsize = 0;
56582
56583 /* Make sure we have enough room to write the stack and data areas. */
56584+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
56585 if ((dump.u_ssize + 1) * PAGE_SIZE > cprm->limit)
56586 dump.u_ssize = 0;
56587
56588@@ -232,6 +237,8 @@ static int load_aout_binary(struct linux_binprm * bprm)
56589 rlim = rlimit(RLIMIT_DATA);
56590 if (rlim >= RLIM_INFINITY)
56591 rlim = ~0;
56592+
56593+ gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
56594 if (ex.a_data + ex.a_bss > rlim)
56595 return -ENOMEM;
56596
56597@@ -264,6 +271,27 @@ static int load_aout_binary(struct linux_binprm * bprm)
56598
56599 install_exec_creds(bprm);
56600
56601+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
56602+ current->mm->pax_flags = 0UL;
56603+#endif
56604+
56605+#ifdef CONFIG_PAX_PAGEEXEC
56606+ if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
56607+ current->mm->pax_flags |= MF_PAX_PAGEEXEC;
56608+
56609+#ifdef CONFIG_PAX_EMUTRAMP
56610+ if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
56611+ current->mm->pax_flags |= MF_PAX_EMUTRAMP;
56612+#endif
56613+
56614+#ifdef CONFIG_PAX_MPROTECT
56615+ if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
56616+ current->mm->pax_flags |= MF_PAX_MPROTECT;
56617+#endif
56618+
56619+ }
56620+#endif
56621+
56622 if (N_MAGIC(ex) == OMAGIC) {
56623 unsigned long text_addr, map_size;
56624 loff_t pos;
56625@@ -321,7 +349,7 @@ static int load_aout_binary(struct linux_binprm * bprm)
56626 }
56627
56628 error = vm_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
56629- PROT_READ | PROT_WRITE | PROT_EXEC,
56630+ PROT_READ | PROT_WRITE,
56631 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
56632 fd_offset + ex.a_text);
56633 if (error != N_DATADDR(ex)) {
56634diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
56635index 67be295..83e2f86 100644
56636--- a/fs/binfmt_elf.c
56637+++ b/fs/binfmt_elf.c
56638@@ -34,6 +34,7 @@
56639 #include <linux/utsname.h>
56640 #include <linux/coredump.h>
56641 #include <linux/sched.h>
56642+#include <linux/xattr.h>
56643 #include <asm/uaccess.h>
56644 #include <asm/param.h>
56645 #include <asm/page.h>
56646@@ -48,7 +49,7 @@
56647 static int load_elf_binary(struct linux_binprm *bprm);
56648 static int load_elf_library(struct file *);
56649 static unsigned long elf_map(struct file *, unsigned long, struct elf_phdr *,
56650- int, int, unsigned long);
56651+ int, int, unsigned long) __intentional_overflow(-1);
56652
56653 /*
56654 * If we don't support core dumping, then supply a NULL so we
56655@@ -60,6 +61,14 @@ static int elf_core_dump(struct coredump_params *cprm);
56656 #define elf_core_dump NULL
56657 #endif
56658
56659+#ifdef CONFIG_PAX_MPROTECT
56660+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
56661+#endif
56662+
56663+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
56664+static void elf_handle_mmap(struct file *file);
56665+#endif
56666+
56667 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
56668 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
56669 #else
56670@@ -79,6 +88,15 @@ static struct linux_binfmt elf_format = {
56671 .load_binary = load_elf_binary,
56672 .load_shlib = load_elf_library,
56673 .core_dump = elf_core_dump,
56674+
56675+#ifdef CONFIG_PAX_MPROTECT
56676+ .handle_mprotect= elf_handle_mprotect,
56677+#endif
56678+
56679+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
56680+ .handle_mmap = elf_handle_mmap,
56681+#endif
56682+
56683 .min_coredump = ELF_EXEC_PAGESIZE,
56684 };
56685
56686@@ -86,6 +104,8 @@ static struct linux_binfmt elf_format = {
56687
56688 static int set_brk(unsigned long start, unsigned long end)
56689 {
56690+ unsigned long e = end;
56691+
56692 start = ELF_PAGEALIGN(start);
56693 end = ELF_PAGEALIGN(end);
56694 if (end > start) {
56695@@ -94,7 +114,7 @@ static int set_brk(unsigned long start, unsigned long end)
56696 if (BAD_ADDR(addr))
56697 return addr;
56698 }
56699- current->mm->start_brk = current->mm->brk = end;
56700+ current->mm->start_brk = current->mm->brk = e;
56701 return 0;
56702 }
56703
56704@@ -155,12 +175,13 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
56705 elf_addr_t __user *u_rand_bytes;
56706 const char *k_platform = ELF_PLATFORM;
56707 const char *k_base_platform = ELF_BASE_PLATFORM;
56708- unsigned char k_rand_bytes[16];
56709+ u32 k_rand_bytes[4];
56710 int items;
56711 elf_addr_t *elf_info;
56712 int ei_index = 0;
56713 const struct cred *cred = current_cred();
56714 struct vm_area_struct *vma;
56715+ unsigned long saved_auxv[AT_VECTOR_SIZE];
56716
56717 /*
56718 * In some cases (e.g. Hyper-Threading), we want to avoid L1
56719@@ -202,8 +223,12 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
56720 * Generate 16 random bytes for userspace PRNG seeding.
56721 */
56722 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
56723- u_rand_bytes = (elf_addr_t __user *)
56724- STACK_ALLOC(p, sizeof(k_rand_bytes));
56725+ prandom_seed(k_rand_bytes[0] ^ prandom_u32());
56726+ prandom_seed(k_rand_bytes[1] ^ prandom_u32());
56727+ prandom_seed(k_rand_bytes[2] ^ prandom_u32());
56728+ prandom_seed(k_rand_bytes[3] ^ prandom_u32());
56729+ p = STACK_ROUND(p, sizeof(k_rand_bytes));
56730+ u_rand_bytes = (elf_addr_t __user *) p;
56731 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
56732 return -EFAULT;
56733
56734@@ -318,9 +343,11 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
56735 return -EFAULT;
56736 current->mm->env_end = p;
56737
56738+ memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t));
56739+
56740 /* Put the elf_info on the stack in the right place. */
56741 sp = (elf_addr_t __user *)envp + 1;
56742- if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
56743+ if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t)))
56744 return -EFAULT;
56745 return 0;
56746 }
56747@@ -388,15 +415,14 @@ static unsigned long total_mapping_size(struct elf_phdr *cmds, int nr)
56748 an ELF header */
56749
56750 static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
56751- struct file *interpreter, unsigned long *interp_map_addr,
56752- unsigned long no_base)
56753+ struct file *interpreter, unsigned long no_base)
56754 {
56755 struct elf_phdr *elf_phdata;
56756 struct elf_phdr *eppnt;
56757- unsigned long load_addr = 0;
56758+ unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
56759 int load_addr_set = 0;
56760 unsigned long last_bss = 0, elf_bss = 0;
56761- unsigned long error = ~0UL;
56762+ unsigned long error = -EINVAL;
56763 unsigned long total_size;
56764 int retval, i, size;
56765
56766@@ -442,6 +468,11 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
56767 goto out_close;
56768 }
56769
56770+#ifdef CONFIG_PAX_SEGMEXEC
56771+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
56772+ pax_task_size = SEGMEXEC_TASK_SIZE;
56773+#endif
56774+
56775 eppnt = elf_phdata;
56776 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
56777 if (eppnt->p_type == PT_LOAD) {
56778@@ -465,8 +496,6 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
56779 map_addr = elf_map(interpreter, load_addr + vaddr,
56780 eppnt, elf_prot, elf_type, total_size);
56781 total_size = 0;
56782- if (!*interp_map_addr)
56783- *interp_map_addr = map_addr;
56784 error = map_addr;
56785 if (BAD_ADDR(map_addr))
56786 goto out_close;
56787@@ -485,8 +514,8 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
56788 k = load_addr + eppnt->p_vaddr;
56789 if (BAD_ADDR(k) ||
56790 eppnt->p_filesz > eppnt->p_memsz ||
56791- eppnt->p_memsz > TASK_SIZE ||
56792- TASK_SIZE - eppnt->p_memsz < k) {
56793+ eppnt->p_memsz > pax_task_size ||
56794+ pax_task_size - eppnt->p_memsz < k) {
56795 error = -ENOMEM;
56796 goto out_close;
56797 }
56798@@ -525,9 +554,11 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
56799 elf_bss = ELF_PAGESTART(elf_bss + ELF_MIN_ALIGN - 1);
56800
56801 /* Map the last of the bss segment */
56802- error = vm_brk(elf_bss, last_bss - elf_bss);
56803- if (BAD_ADDR(error))
56804- goto out_close;
56805+ if (last_bss > elf_bss) {
56806+ error = vm_brk(elf_bss, last_bss - elf_bss);
56807+ if (BAD_ADDR(error))
56808+ goto out_close;
56809+ }
56810 }
56811
56812 error = load_addr;
56813@@ -538,6 +569,336 @@ out:
56814 return error;
56815 }
56816
56817+#ifdef CONFIG_PAX_PT_PAX_FLAGS
56818+#ifdef CONFIG_PAX_SOFTMODE
56819+static unsigned long pax_parse_pt_pax_softmode(const struct elf_phdr * const elf_phdata)
56820+{
56821+ unsigned long pax_flags = 0UL;
56822+
56823+#ifdef CONFIG_PAX_PAGEEXEC
56824+ if (elf_phdata->p_flags & PF_PAGEEXEC)
56825+ pax_flags |= MF_PAX_PAGEEXEC;
56826+#endif
56827+
56828+#ifdef CONFIG_PAX_SEGMEXEC
56829+ if (elf_phdata->p_flags & PF_SEGMEXEC)
56830+ pax_flags |= MF_PAX_SEGMEXEC;
56831+#endif
56832+
56833+#ifdef CONFIG_PAX_EMUTRAMP
56834+ if ((elf_phdata->p_flags & PF_EMUTRAMP) && (pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)))
56835+ pax_flags |= MF_PAX_EMUTRAMP;
56836+#endif
56837+
56838+#ifdef CONFIG_PAX_MPROTECT
56839+ if (elf_phdata->p_flags & PF_MPROTECT)
56840+ pax_flags |= MF_PAX_MPROTECT;
56841+#endif
56842+
56843+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
56844+ if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
56845+ pax_flags |= MF_PAX_RANDMMAP;
56846+#endif
56847+
56848+ return pax_flags;
56849+}
56850+#endif
56851+
56852+static unsigned long pax_parse_pt_pax_hardmode(const struct elf_phdr * const elf_phdata)
56853+{
56854+ unsigned long pax_flags = 0UL;
56855+
56856+#ifdef CONFIG_PAX_PAGEEXEC
56857+ if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
56858+ pax_flags |= MF_PAX_PAGEEXEC;
56859+#endif
56860+
56861+#ifdef CONFIG_PAX_SEGMEXEC
56862+ if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
56863+ pax_flags |= MF_PAX_SEGMEXEC;
56864+#endif
56865+
56866+#ifdef CONFIG_PAX_EMUTRAMP
56867+ if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
56868+ pax_flags |= MF_PAX_EMUTRAMP;
56869+#endif
56870+
56871+#ifdef CONFIG_PAX_MPROTECT
56872+ if (!(elf_phdata->p_flags & PF_NOMPROTECT))
56873+ pax_flags |= MF_PAX_MPROTECT;
56874+#endif
56875+
56876+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
56877+ if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
56878+ pax_flags |= MF_PAX_RANDMMAP;
56879+#endif
56880+
56881+ return pax_flags;
56882+}
56883+#endif
56884+
56885+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
56886+#ifdef CONFIG_PAX_SOFTMODE
56887+static unsigned long pax_parse_xattr_pax_softmode(unsigned long pax_flags_softmode)
56888+{
56889+ unsigned long pax_flags = 0UL;
56890+
56891+#ifdef CONFIG_PAX_PAGEEXEC
56892+ if (pax_flags_softmode & MF_PAX_PAGEEXEC)
56893+ pax_flags |= MF_PAX_PAGEEXEC;
56894+#endif
56895+
56896+#ifdef CONFIG_PAX_SEGMEXEC
56897+ if (pax_flags_softmode & MF_PAX_SEGMEXEC)
56898+ pax_flags |= MF_PAX_SEGMEXEC;
56899+#endif
56900+
56901+#ifdef CONFIG_PAX_EMUTRAMP
56902+ if ((pax_flags_softmode & MF_PAX_EMUTRAMP) && (pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)))
56903+ pax_flags |= MF_PAX_EMUTRAMP;
56904+#endif
56905+
56906+#ifdef CONFIG_PAX_MPROTECT
56907+ if (pax_flags_softmode & MF_PAX_MPROTECT)
56908+ pax_flags |= MF_PAX_MPROTECT;
56909+#endif
56910+
56911+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
56912+ if (randomize_va_space && (pax_flags_softmode & MF_PAX_RANDMMAP))
56913+ pax_flags |= MF_PAX_RANDMMAP;
56914+#endif
56915+
56916+ return pax_flags;
56917+}
56918+#endif
56919+
56920+static unsigned long pax_parse_xattr_pax_hardmode(unsigned long pax_flags_hardmode)
56921+{
56922+ unsigned long pax_flags = 0UL;
56923+
56924+#ifdef CONFIG_PAX_PAGEEXEC
56925+ if (!(pax_flags_hardmode & MF_PAX_PAGEEXEC))
56926+ pax_flags |= MF_PAX_PAGEEXEC;
56927+#endif
56928+
56929+#ifdef CONFIG_PAX_SEGMEXEC
56930+ if (!(pax_flags_hardmode & MF_PAX_SEGMEXEC))
56931+ pax_flags |= MF_PAX_SEGMEXEC;
56932+#endif
56933+
56934+#ifdef CONFIG_PAX_EMUTRAMP
56935+ if (!(pax_flags_hardmode & MF_PAX_EMUTRAMP))
56936+ pax_flags |= MF_PAX_EMUTRAMP;
56937+#endif
56938+
56939+#ifdef CONFIG_PAX_MPROTECT
56940+ if (!(pax_flags_hardmode & MF_PAX_MPROTECT))
56941+ pax_flags |= MF_PAX_MPROTECT;
56942+#endif
56943+
56944+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
56945+ if (randomize_va_space && !(pax_flags_hardmode & MF_PAX_RANDMMAP))
56946+ pax_flags |= MF_PAX_RANDMMAP;
56947+#endif
56948+
56949+ return pax_flags;
56950+}
56951+#endif
56952+
56953+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
56954+static unsigned long pax_parse_defaults(void)
56955+{
56956+ unsigned long pax_flags = 0UL;
56957+
56958+#ifdef CONFIG_PAX_SOFTMODE
56959+ if (pax_softmode)
56960+ return pax_flags;
56961+#endif
56962+
56963+#ifdef CONFIG_PAX_PAGEEXEC
56964+ pax_flags |= MF_PAX_PAGEEXEC;
56965+#endif
56966+
56967+#ifdef CONFIG_PAX_SEGMEXEC
56968+ pax_flags |= MF_PAX_SEGMEXEC;
56969+#endif
56970+
56971+#ifdef CONFIG_PAX_MPROTECT
56972+ pax_flags |= MF_PAX_MPROTECT;
56973+#endif
56974+
56975+#ifdef CONFIG_PAX_RANDMMAP
56976+ if (randomize_va_space)
56977+ pax_flags |= MF_PAX_RANDMMAP;
56978+#endif
56979+
56980+ return pax_flags;
56981+}
56982+
56983+static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
56984+{
56985+ unsigned long pax_flags = PAX_PARSE_FLAGS_FALLBACK;
56986+
56987+#ifdef CONFIG_PAX_EI_PAX
56988+
56989+#ifdef CONFIG_PAX_SOFTMODE
56990+ if (pax_softmode)
56991+ return pax_flags;
56992+#endif
56993+
56994+ pax_flags = 0UL;
56995+
56996+#ifdef CONFIG_PAX_PAGEEXEC
56997+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
56998+ pax_flags |= MF_PAX_PAGEEXEC;
56999+#endif
57000+
57001+#ifdef CONFIG_PAX_SEGMEXEC
57002+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
57003+ pax_flags |= MF_PAX_SEGMEXEC;
57004+#endif
57005+
57006+#ifdef CONFIG_PAX_EMUTRAMP
57007+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
57008+ pax_flags |= MF_PAX_EMUTRAMP;
57009+#endif
57010+
57011+#ifdef CONFIG_PAX_MPROTECT
57012+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
57013+ pax_flags |= MF_PAX_MPROTECT;
57014+#endif
57015+
57016+#ifdef CONFIG_PAX_ASLR
57017+ if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
57018+ pax_flags |= MF_PAX_RANDMMAP;
57019+#endif
57020+
57021+#endif
57022+
57023+ return pax_flags;
57024+
57025+}
57026+
57027+static unsigned long pax_parse_pt_pax(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
57028+{
57029+
57030+#ifdef CONFIG_PAX_PT_PAX_FLAGS
57031+ unsigned long i;
57032+
57033+ for (i = 0UL; i < elf_ex->e_phnum; i++)
57034+ if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
57035+ if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
57036+ ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
57037+ ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
57038+ ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
57039+ ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
57040+ return PAX_PARSE_FLAGS_FALLBACK;
57041+
57042+#ifdef CONFIG_PAX_SOFTMODE
57043+ if (pax_softmode)
57044+ return pax_parse_pt_pax_softmode(&elf_phdata[i]);
57045+ else
57046+#endif
57047+
57048+ return pax_parse_pt_pax_hardmode(&elf_phdata[i]);
57049+ break;
57050+ }
57051+#endif
57052+
57053+ return PAX_PARSE_FLAGS_FALLBACK;
57054+}
57055+
57056+static unsigned long pax_parse_xattr_pax(struct file * const file)
57057+{
57058+
57059+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
57060+ ssize_t xattr_size, i;
57061+ unsigned char xattr_value[sizeof("pemrs") - 1];
57062+ unsigned long pax_flags_hardmode = 0UL, pax_flags_softmode = 0UL;
57063+
57064+ xattr_size = pax_getxattr(file->f_path.dentry, xattr_value, sizeof xattr_value);
57065+ if (xattr_size < 0 || xattr_size > sizeof xattr_value)
57066+ return PAX_PARSE_FLAGS_FALLBACK;
57067+
57068+ for (i = 0; i < xattr_size; i++)
57069+ switch (xattr_value[i]) {
57070+ default:
57071+ return PAX_PARSE_FLAGS_FALLBACK;
57072+
57073+#define parse_flag(option1, option2, flag) \
57074+ case option1: \
57075+ if (pax_flags_hardmode & MF_PAX_##flag) \
57076+ return PAX_PARSE_FLAGS_FALLBACK;\
57077+ pax_flags_hardmode |= MF_PAX_##flag; \
57078+ break; \
57079+ case option2: \
57080+ if (pax_flags_softmode & MF_PAX_##flag) \
57081+ return PAX_PARSE_FLAGS_FALLBACK;\
57082+ pax_flags_softmode |= MF_PAX_##flag; \
57083+ break;
57084+
57085+ parse_flag('p', 'P', PAGEEXEC);
57086+ parse_flag('e', 'E', EMUTRAMP);
57087+ parse_flag('m', 'M', MPROTECT);
57088+ parse_flag('r', 'R', RANDMMAP);
57089+ parse_flag('s', 'S', SEGMEXEC);
57090+
57091+#undef parse_flag
57092+ }
57093+
57094+ if (pax_flags_hardmode & pax_flags_softmode)
57095+ return PAX_PARSE_FLAGS_FALLBACK;
57096+
57097+#ifdef CONFIG_PAX_SOFTMODE
57098+ if (pax_softmode)
57099+ return pax_parse_xattr_pax_softmode(pax_flags_softmode);
57100+ else
57101+#endif
57102+
57103+ return pax_parse_xattr_pax_hardmode(pax_flags_hardmode);
57104+#else
57105+ return PAX_PARSE_FLAGS_FALLBACK;
57106+#endif
57107+
57108+}
57109+
57110+static long pax_parse_pax_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata, struct file * const file)
57111+{
57112+ unsigned long pax_flags, ei_pax_flags, pt_pax_flags, xattr_pax_flags;
57113+
57114+ pax_flags = pax_parse_defaults();
57115+ ei_pax_flags = pax_parse_ei_pax(elf_ex);
57116+ pt_pax_flags = pax_parse_pt_pax(elf_ex, elf_phdata);
57117+ xattr_pax_flags = pax_parse_xattr_pax(file);
57118+
57119+ if (pt_pax_flags != PAX_PARSE_FLAGS_FALLBACK &&
57120+ xattr_pax_flags != PAX_PARSE_FLAGS_FALLBACK &&
57121+ pt_pax_flags != xattr_pax_flags)
57122+ return -EINVAL;
57123+ if (xattr_pax_flags != PAX_PARSE_FLAGS_FALLBACK)
57124+ pax_flags = xattr_pax_flags;
57125+ else if (pt_pax_flags != PAX_PARSE_FLAGS_FALLBACK)
57126+ pax_flags = pt_pax_flags;
57127+ else if (ei_pax_flags != PAX_PARSE_FLAGS_FALLBACK)
57128+ pax_flags = ei_pax_flags;
57129+
57130+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
57131+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
57132+ if ((__supported_pte_mask & _PAGE_NX))
57133+ pax_flags &= ~MF_PAX_SEGMEXEC;
57134+ else
57135+ pax_flags &= ~MF_PAX_PAGEEXEC;
57136+ }
57137+#endif
57138+
57139+ if (0 > pax_check_flags(&pax_flags))
57140+ return -EINVAL;
57141+
57142+ current->mm->pax_flags = pax_flags;
57143+ return 0;
57144+}
57145+#endif
57146+
57147 /*
57148 * These are the functions used to load ELF style executables and shared
57149 * libraries. There is no binary dependent code anywhere else.
57150@@ -551,6 +912,11 @@ static unsigned long randomize_stack_top(unsigned long stack_top)
57151 {
57152 unsigned int random_variable = 0;
57153
57154+#ifdef CONFIG_PAX_RANDUSTACK
57155+ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
57156+ return stack_top - current->mm->delta_stack;
57157+#endif
57158+
57159 if ((current->flags & PF_RANDOMIZE) &&
57160 !(current->personality & ADDR_NO_RANDOMIZE)) {
57161 random_variable = get_random_int() & STACK_RND_MASK;
57162@@ -569,7 +935,7 @@ static int load_elf_binary(struct linux_binprm *bprm)
57163 unsigned long load_addr = 0, load_bias = 0;
57164 int load_addr_set = 0;
57165 char * elf_interpreter = NULL;
57166- unsigned long error;
57167+ unsigned long error = 0;
57168 struct elf_phdr *elf_ppnt, *elf_phdata;
57169 unsigned long elf_bss, elf_brk;
57170 int retval, i;
57171@@ -579,12 +945,12 @@ static int load_elf_binary(struct linux_binprm *bprm)
57172 unsigned long start_code, end_code, start_data, end_data;
57173 unsigned long reloc_func_desc __maybe_unused = 0;
57174 int executable_stack = EXSTACK_DEFAULT;
57175- unsigned long def_flags = 0;
57176 struct pt_regs *regs = current_pt_regs();
57177 struct {
57178 struct elfhdr elf_ex;
57179 struct elfhdr interp_elf_ex;
57180 } *loc;
57181+ unsigned long pax_task_size;
57182
57183 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
57184 if (!loc) {
57185@@ -720,11 +1086,82 @@ static int load_elf_binary(struct linux_binprm *bprm)
57186 goto out_free_dentry;
57187
57188 /* OK, This is the point of no return */
57189- current->mm->def_flags = def_flags;
57190+ current->mm->def_flags = 0;
57191
57192 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
57193 may depend on the personality. */
57194 SET_PERSONALITY(loc->elf_ex);
57195+
57196+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
57197+ current->mm->pax_flags = 0UL;
57198+#endif
57199+
57200+#ifdef CONFIG_PAX_DLRESOLVE
57201+ current->mm->call_dl_resolve = 0UL;
57202+#endif
57203+
57204+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
57205+ current->mm->call_syscall = 0UL;
57206+#endif
57207+
57208+#ifdef CONFIG_PAX_ASLR
57209+ current->mm->delta_mmap = 0UL;
57210+ current->mm->delta_stack = 0UL;
57211+#endif
57212+
57213+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
57214+ if (0 > pax_parse_pax_flags(&loc->elf_ex, elf_phdata, bprm->file)) {
57215+ send_sig(SIGKILL, current, 0);
57216+ goto out_free_dentry;
57217+ }
57218+#endif
57219+
57220+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
57221+ pax_set_initial_flags(bprm);
57222+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
57223+ if (pax_set_initial_flags_func)
57224+ (pax_set_initial_flags_func)(bprm);
57225+#endif
57226+
57227+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
57228+ if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !(__supported_pte_mask & _PAGE_NX)) {
57229+ current->mm->context.user_cs_limit = PAGE_SIZE;
57230+ current->mm->def_flags |= VM_PAGEEXEC | VM_NOHUGEPAGE;
57231+ }
57232+#endif
57233+
57234+#ifdef CONFIG_PAX_SEGMEXEC
57235+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
57236+ current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
57237+ current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
57238+ pax_task_size = SEGMEXEC_TASK_SIZE;
57239+ current->mm->def_flags |= VM_NOHUGEPAGE;
57240+ } else
57241+#endif
57242+
57243+ pax_task_size = TASK_SIZE;
57244+
57245+#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
57246+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
57247+ set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
57248+ put_cpu();
57249+ }
57250+#endif
57251+
57252+#ifdef CONFIG_PAX_ASLR
57253+ if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
57254+ current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
57255+ current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
57256+ }
57257+#endif
57258+
57259+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
57260+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
57261+ executable_stack = EXSTACK_DISABLE_X;
57262+ current->personality &= ~READ_IMPLIES_EXEC;
57263+ } else
57264+#endif
57265+
57266 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
57267 current->personality |= READ_IMPLIES_EXEC;
57268
57269@@ -814,6 +1251,20 @@ static int load_elf_binary(struct linux_binprm *bprm)
57270 #else
57271 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
57272 #endif
57273+
57274+#ifdef CONFIG_PAX_RANDMMAP
57275+ /* PaX: randomize base address at the default exe base if requested */
57276+ if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
57277+#ifdef CONFIG_SPARC64
57278+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
57279+#else
57280+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
57281+#endif
57282+ load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
57283+ elf_flags |= MAP_FIXED;
57284+ }
57285+#endif
57286+
57287 }
57288
57289 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
57290@@ -846,9 +1297,9 @@ static int load_elf_binary(struct linux_binprm *bprm)
57291 * allowed task size. Note that p_filesz must always be
57292 * <= p_memsz so it is only necessary to check p_memsz.
57293 */
57294- if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
57295- elf_ppnt->p_memsz > TASK_SIZE ||
57296- TASK_SIZE - elf_ppnt->p_memsz < k) {
57297+ if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
57298+ elf_ppnt->p_memsz > pax_task_size ||
57299+ pax_task_size - elf_ppnt->p_memsz < k) {
57300 /* set_brk can never work. Avoid overflows. */
57301 send_sig(SIGKILL, current, 0);
57302 retval = -EINVAL;
57303@@ -887,17 +1338,45 @@ static int load_elf_binary(struct linux_binprm *bprm)
57304 goto out_free_dentry;
57305 }
57306 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
57307- send_sig(SIGSEGV, current, 0);
57308- retval = -EFAULT; /* Nobody gets to see this, but.. */
57309- goto out_free_dentry;
57310+ /*
57311+ * This bss-zeroing can fail if the ELF
57312+ * file specifies odd protections. So
57313+ * we don't check the return value
57314+ */
57315 }
57316
57317+#ifdef CONFIG_PAX_RANDMMAP
57318+ if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
57319+ unsigned long start, size, flags;
57320+ vm_flags_t vm_flags;
57321+
57322+ start = ELF_PAGEALIGN(elf_brk);
57323+ size = PAGE_SIZE + ((pax_get_random_long() & ((1UL << 22) - 1UL)) << 4);
57324+ flags = MAP_FIXED | MAP_PRIVATE;
57325+ vm_flags = VM_DONTEXPAND | VM_DONTDUMP;
57326+
57327+ down_write(&current->mm->mmap_sem);
57328+ start = get_unmapped_area(NULL, start, PAGE_ALIGN(size), 0, flags);
57329+ retval = -ENOMEM;
57330+ if (!IS_ERR_VALUE(start) && !find_vma_intersection(current->mm, start, start + size + PAGE_SIZE)) {
57331+// if (current->personality & ADDR_NO_RANDOMIZE)
57332+// vm_flags |= VM_READ | VM_MAYREAD;
57333+ start = mmap_region(NULL, start, PAGE_ALIGN(size), vm_flags, 0);
57334+ retval = IS_ERR_VALUE(start) ? start : 0;
57335+ }
57336+ up_write(&current->mm->mmap_sem);
57337+ if (retval == 0)
57338+ retval = set_brk(start + size, start + size + PAGE_SIZE);
57339+ if (retval < 0) {
57340+ send_sig(SIGKILL, current, 0);
57341+ goto out_free_dentry;
57342+ }
57343+ }
57344+#endif
57345+
57346 if (elf_interpreter) {
57347- unsigned long interp_map_addr = 0;
57348-
57349 elf_entry = load_elf_interp(&loc->interp_elf_ex,
57350 interpreter,
57351- &interp_map_addr,
57352 load_bias);
57353 if (!IS_ERR((void *)elf_entry)) {
57354 /*
57355@@ -1119,7 +1598,7 @@ static bool always_dump_vma(struct vm_area_struct *vma)
57356 * Decide what to dump of a segment, part, all or none.
57357 */
57358 static unsigned long vma_dump_size(struct vm_area_struct *vma,
57359- unsigned long mm_flags)
57360+ unsigned long mm_flags, long signr)
57361 {
57362 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
57363
57364@@ -1157,7 +1636,7 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma,
57365 if (vma->vm_file == NULL)
57366 return 0;
57367
57368- if (FILTER(MAPPED_PRIVATE))
57369+ if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
57370 goto whole;
57371
57372 /*
57373@@ -1364,9 +1843,9 @@ static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
57374 {
57375 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
57376 int i = 0;
57377- do
57378+ do {
57379 i += 2;
57380- while (auxv[i - 2] != AT_NULL);
57381+ } while (auxv[i - 2] != AT_NULL);
57382 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
57383 }
57384
57385@@ -1375,7 +1854,7 @@ static void fill_siginfo_note(struct memelfnote *note, user_siginfo_t *csigdata,
57386 {
57387 mm_segment_t old_fs = get_fs();
57388 set_fs(KERNEL_DS);
57389- copy_siginfo_to_user((user_siginfo_t __user *) csigdata, siginfo);
57390+ copy_siginfo_to_user((user_siginfo_t __force_user *) csigdata, siginfo);
57391 set_fs(old_fs);
57392 fill_note(note, "CORE", NT_SIGINFO, sizeof(*csigdata), csigdata);
57393 }
57394@@ -1999,14 +2478,14 @@ static void fill_extnum_info(struct elfhdr *elf, struct elf_shdr *shdr4extnum,
57395 }
57396
57397 static size_t elf_core_vma_data_size(struct vm_area_struct *gate_vma,
57398- unsigned long mm_flags)
57399+ struct coredump_params *cprm)
57400 {
57401 struct vm_area_struct *vma;
57402 size_t size = 0;
57403
57404 for (vma = first_vma(current, gate_vma); vma != NULL;
57405 vma = next_vma(vma, gate_vma))
57406- size += vma_dump_size(vma, mm_flags);
57407+ size += vma_dump_size(vma, cprm->mm_flags, cprm->siginfo->si_signo);
57408 return size;
57409 }
57410
57411@@ -2097,7 +2576,7 @@ static int elf_core_dump(struct coredump_params *cprm)
57412
57413 dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
57414
57415- offset += elf_core_vma_data_size(gate_vma, cprm->mm_flags);
57416+ offset += elf_core_vma_data_size(gate_vma, cprm);
57417 offset += elf_core_extra_data_size();
57418 e_shoff = offset;
57419
57420@@ -2125,7 +2604,7 @@ static int elf_core_dump(struct coredump_params *cprm)
57421 phdr.p_offset = offset;
57422 phdr.p_vaddr = vma->vm_start;
57423 phdr.p_paddr = 0;
57424- phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags);
57425+ phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags, cprm->siginfo->si_signo);
57426 phdr.p_memsz = vma->vm_end - vma->vm_start;
57427 offset += phdr.p_filesz;
57428 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
57429@@ -2158,7 +2637,7 @@ static int elf_core_dump(struct coredump_params *cprm)
57430 unsigned long addr;
57431 unsigned long end;
57432
57433- end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags);
57434+ end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags, cprm->siginfo->si_signo);
57435
57436 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
57437 struct page *page;
57438@@ -2199,6 +2678,167 @@ out:
57439
57440 #endif /* CONFIG_ELF_CORE */
57441
57442+#ifdef CONFIG_PAX_MPROTECT
57443+/* PaX: non-PIC ELF libraries need relocations on their executable segments
57444+ * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
57445+ * we'll remove VM_MAYWRITE for good on RELRO segments.
57446+ *
57447+ * The checks favour ld-linux.so behaviour which operates on a per ELF segment
57448+ * basis because we want to allow the common case and not the special ones.
57449+ */
57450+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
57451+{
57452+ struct elfhdr elf_h;
57453+ struct elf_phdr elf_p;
57454+ unsigned long i;
57455+ unsigned long oldflags;
57456+ bool is_textrel_rw, is_textrel_rx, is_relro;
57457+
57458+ if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT) || !vma->vm_file)
57459+ return;
57460+
57461+ oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
57462+ newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
57463+
57464+#ifdef CONFIG_PAX_ELFRELOCS
57465+ /* possible TEXTREL */
57466+ is_textrel_rw = !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
57467+ is_textrel_rx = vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
57468+#else
57469+ is_textrel_rw = false;
57470+ is_textrel_rx = false;
57471+#endif
57472+
57473+ /* possible RELRO */
57474+ is_relro = vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
57475+
57476+ if (!is_textrel_rw && !is_textrel_rx && !is_relro)
57477+ return;
57478+
57479+ if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
57480+ memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
57481+
57482+#ifdef CONFIG_PAX_ETEXECRELOCS
57483+ ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
57484+#else
57485+ ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
57486+#endif
57487+
57488+ (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
57489+ !elf_check_arch(&elf_h) ||
57490+ elf_h.e_phentsize != sizeof(struct elf_phdr) ||
57491+ elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
57492+ return;
57493+
57494+ for (i = 0UL; i < elf_h.e_phnum; i++) {
57495+ if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
57496+ return;
57497+ switch (elf_p.p_type) {
57498+ case PT_DYNAMIC:
57499+ if (!is_textrel_rw && !is_textrel_rx)
57500+ continue;
57501+ i = 0UL;
57502+ while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
57503+ elf_dyn dyn;
57504+
57505+ if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
57506+ break;
57507+ if (dyn.d_tag == DT_NULL)
57508+ break;
57509+ if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
57510+ gr_log_textrel(vma);
57511+ if (is_textrel_rw)
57512+ vma->vm_flags |= VM_MAYWRITE;
57513+ else
57514+ /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
57515+ vma->vm_flags &= ~VM_MAYWRITE;
57516+ break;
57517+ }
57518+ i++;
57519+ }
57520+ is_textrel_rw = false;
57521+ is_textrel_rx = false;
57522+ continue;
57523+
57524+ case PT_GNU_RELRO:
57525+ if (!is_relro)
57526+ continue;
57527+ if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
57528+ vma->vm_flags &= ~VM_MAYWRITE;
57529+ is_relro = false;
57530+ continue;
57531+
57532+#ifdef CONFIG_PAX_PT_PAX_FLAGS
57533+ case PT_PAX_FLAGS: {
57534+ const char *msg_mprotect = "", *msg_emutramp = "";
57535+ char *buffer_lib, *buffer_exe;
57536+
57537+ if (elf_p.p_flags & PF_NOMPROTECT)
57538+ msg_mprotect = "MPROTECT disabled";
57539+
57540+#ifdef CONFIG_PAX_EMUTRAMP
57541+ if (!(vma->vm_mm->pax_flags & MF_PAX_EMUTRAMP) && !(elf_p.p_flags & PF_NOEMUTRAMP))
57542+ msg_emutramp = "EMUTRAMP enabled";
57543+#endif
57544+
57545+ if (!msg_mprotect[0] && !msg_emutramp[0])
57546+ continue;
57547+
57548+ if (!printk_ratelimit())
57549+ continue;
57550+
57551+ buffer_lib = (char *)__get_free_page(GFP_KERNEL);
57552+ buffer_exe = (char *)__get_free_page(GFP_KERNEL);
57553+ if (buffer_lib && buffer_exe) {
57554+ char *path_lib, *path_exe;
57555+
57556+ path_lib = pax_get_path(&vma->vm_file->f_path, buffer_lib, PAGE_SIZE);
57557+ path_exe = pax_get_path(&vma->vm_mm->exe_file->f_path, buffer_exe, PAGE_SIZE);
57558+
57559+ pr_info("PAX: %s wants %s%s%s on %s\n", path_lib, msg_mprotect,
57560+ (msg_mprotect[0] && msg_emutramp[0] ? " and " : ""), msg_emutramp, path_exe);
57561+
57562+ }
57563+ free_page((unsigned long)buffer_exe);
57564+ free_page((unsigned long)buffer_lib);
57565+ continue;
57566+ }
57567+#endif
57568+
57569+ }
57570+ }
57571+}
57572+#endif
57573+
57574+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
57575+
57576+extern int grsec_enable_log_rwxmaps;
57577+
57578+static void elf_handle_mmap(struct file *file)
57579+{
57580+ struct elfhdr elf_h;
57581+ struct elf_phdr elf_p;
57582+ unsigned long i;
57583+
57584+ if (!grsec_enable_log_rwxmaps)
57585+ return;
57586+
57587+ if (sizeof(elf_h) != kernel_read(file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
57588+ memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
57589+ (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC) || !elf_check_arch(&elf_h) ||
57590+ elf_h.e_phentsize != sizeof(struct elf_phdr) ||
57591+ elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
57592+ return;
57593+
57594+ for (i = 0UL; i < elf_h.e_phnum; i++) {
57595+ if (sizeof(elf_p) != kernel_read(file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
57596+ return;
57597+ if (elf_p.p_type == PT_GNU_STACK && (elf_p.p_flags & PF_X))
57598+ gr_log_ptgnustack(file);
57599+ }
57600+}
57601+#endif
57602+
57603 static int __init init_elf_binfmt(void)
57604 {
57605 register_binfmt(&elf_format);
57606diff --git a/fs/bio.c b/fs/bio.c
57607index 8754e7b..0669094 100644
57608--- a/fs/bio.c
57609+++ b/fs/bio.c
57610@@ -1145,7 +1145,7 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
57611 /*
57612 * Overflow, abort
57613 */
57614- if (end < start)
57615+ if (end < start || end - start > INT_MAX - nr_pages)
57616 return ERR_PTR(-EINVAL);
57617
57618 nr_pages += end - start;
57619@@ -1279,7 +1279,7 @@ static struct bio *__bio_map_user_iov(struct request_queue *q,
57620 /*
57621 * Overflow, abort
57622 */
57623- if (end < start)
57624+ if (end < start || end - start > INT_MAX - nr_pages)
57625 return ERR_PTR(-EINVAL);
57626
57627 nr_pages += end - start;
57628@@ -1541,7 +1541,7 @@ static void bio_copy_kern_endio(struct bio *bio, int err)
57629 const int read = bio_data_dir(bio) == READ;
57630 struct bio_map_data *bmd = bio->bi_private;
57631 int i;
57632- char *p = bmd->sgvecs[0].iov_base;
57633+ char *p = (char __force_kernel *)bmd->sgvecs[0].iov_base;
57634
57635 bio_for_each_segment_all(bvec, bio, i) {
57636 char *addr = page_address(bvec->bv_page);
57637diff --git a/fs/block_dev.c b/fs/block_dev.c
57638index 1e86823..8e34695 100644
57639--- a/fs/block_dev.c
57640+++ b/fs/block_dev.c
57641@@ -637,7 +637,7 @@ static bool bd_may_claim(struct block_device *bdev, struct block_device *whole,
57642 else if (bdev->bd_contains == bdev)
57643 return true; /* is a whole device which isn't held */
57644
57645- else if (whole->bd_holder == bd_may_claim)
57646+ else if (whole->bd_holder == (void *)bd_may_claim)
57647 return true; /* is a partition of a device that is being partitioned */
57648 else if (whole->bd_holder != NULL)
57649 return false; /* is a partition of a held device */
57650diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
57651index cbd3a7d6f..c6a2881 100644
57652--- a/fs/btrfs/ctree.c
57653+++ b/fs/btrfs/ctree.c
57654@@ -1216,9 +1216,12 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
57655 free_extent_buffer(buf);
57656 add_root_to_dirty_list(root);
57657 } else {
57658- if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
57659- parent_start = parent->start;
57660- else
57661+ if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
57662+ if (parent)
57663+ parent_start = parent->start;
57664+ else
57665+ parent_start = 0;
57666+ } else
57667 parent_start = 0;
57668
57669 WARN_ON(trans->transid != btrfs_header_generation(parent));
57670diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
57671index 451b00c..a2cccee 100644
57672--- a/fs/btrfs/delayed-inode.c
57673+++ b/fs/btrfs/delayed-inode.c
57674@@ -459,7 +459,7 @@ static int __btrfs_add_delayed_deletion_item(struct btrfs_delayed_node *node,
57675
57676 static void finish_one_item(struct btrfs_delayed_root *delayed_root)
57677 {
57678- int seq = atomic_inc_return(&delayed_root->items_seq);
57679+ int seq = atomic_inc_return_unchecked(&delayed_root->items_seq);
57680 if ((atomic_dec_return(&delayed_root->items) <
57681 BTRFS_DELAYED_BACKGROUND || seq % BTRFS_DELAYED_BATCH == 0) &&
57682 waitqueue_active(&delayed_root->wait))
57683@@ -1409,7 +1409,7 @@ void btrfs_assert_delayed_root_empty(struct btrfs_root *root)
57684
57685 static int could_end_wait(struct btrfs_delayed_root *delayed_root, int seq)
57686 {
57687- int val = atomic_read(&delayed_root->items_seq);
57688+ int val = atomic_read_unchecked(&delayed_root->items_seq);
57689
57690 if (val < seq || val >= seq + BTRFS_DELAYED_BATCH)
57691 return 1;
57692@@ -1433,7 +1433,7 @@ void btrfs_balance_delayed_items(struct btrfs_root *root)
57693 int seq;
57694 int ret;
57695
57696- seq = atomic_read(&delayed_root->items_seq);
57697+ seq = atomic_read_unchecked(&delayed_root->items_seq);
57698
57699 ret = btrfs_wq_run_delayed_node(delayed_root, root, 0);
57700 if (ret)
57701diff --git a/fs/btrfs/delayed-inode.h b/fs/btrfs/delayed-inode.h
57702index f70119f..ab5894d 100644
57703--- a/fs/btrfs/delayed-inode.h
57704+++ b/fs/btrfs/delayed-inode.h
57705@@ -43,7 +43,7 @@ struct btrfs_delayed_root {
57706 */
57707 struct list_head prepare_list;
57708 atomic_t items; /* for delayed items */
57709- atomic_t items_seq; /* for delayed items */
57710+ atomic_unchecked_t items_seq; /* for delayed items */
57711 int nodes; /* for delayed nodes */
57712 wait_queue_head_t wait;
57713 };
57714@@ -90,7 +90,7 @@ static inline void btrfs_init_delayed_root(
57715 struct btrfs_delayed_root *delayed_root)
57716 {
57717 atomic_set(&delayed_root->items, 0);
57718- atomic_set(&delayed_root->items_seq, 0);
57719+ atomic_set_unchecked(&delayed_root->items_seq, 0);
57720 delayed_root->nodes = 0;
57721 spin_lock_init(&delayed_root->lock);
57722 init_waitqueue_head(&delayed_root->wait);
57723diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
57724index a6d8efa..2f062cf 100644
57725--- a/fs/btrfs/ioctl.c
57726+++ b/fs/btrfs/ioctl.c
57727@@ -3491,9 +3491,12 @@ static long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
57728 for (i = 0; i < num_types; i++) {
57729 struct btrfs_space_info *tmp;
57730
57731+ /* Don't copy in more than we allocated */
57732 if (!slot_count)
57733 break;
57734
57735+ slot_count--;
57736+
57737 info = NULL;
57738 rcu_read_lock();
57739 list_for_each_entry_rcu(tmp, &root->fs_info->space_info,
57740@@ -3515,10 +3518,7 @@ static long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
57741 memcpy(dest, &space, sizeof(space));
57742 dest++;
57743 space_args.total_spaces++;
57744- slot_count--;
57745 }
57746- if (!slot_count)
57747- break;
57748 }
57749 up_read(&info->groups_sem);
57750 }
57751diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
57752index d04db81..96e54f1 100644
57753--- a/fs/btrfs/super.c
57754+++ b/fs/btrfs/super.c
57755@@ -268,7 +268,7 @@ void __btrfs_abort_transaction(struct btrfs_trans_handle *trans,
57756 function, line, errstr);
57757 return;
57758 }
57759- ACCESS_ONCE(trans->transaction->aborted) = errno;
57760+ ACCESS_ONCE_RW(trans->transaction->aborted) = errno;
57761 /* Wake up anybody who may be waiting on this transaction */
57762 wake_up(&root->fs_info->transaction_wait);
57763 wake_up(&root->fs_info->transaction_blocked_wait);
57764diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c
57765index 865f4cf..f321e86 100644
57766--- a/fs/btrfs/sysfs.c
57767+++ b/fs/btrfs/sysfs.c
57768@@ -436,7 +436,7 @@ static int addrm_unknown_feature_attrs(struct btrfs_fs_info *fs_info, bool add)
57769 for (set = 0; set < FEAT_MAX; set++) {
57770 int i;
57771 struct attribute *attrs[2];
57772- struct attribute_group agroup = {
57773+ attribute_group_no_const agroup = {
57774 .name = "features",
57775 .attrs = attrs,
57776 };
57777diff --git a/fs/buffer.c b/fs/buffer.c
57778index 27265a8..289f488 100644
57779--- a/fs/buffer.c
57780+++ b/fs/buffer.c
57781@@ -3428,7 +3428,7 @@ void __init buffer_init(void)
57782 bh_cachep = kmem_cache_create("buffer_head",
57783 sizeof(struct buffer_head), 0,
57784 (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
57785- SLAB_MEM_SPREAD),
57786+ SLAB_MEM_SPREAD|SLAB_NO_SANITIZE),
57787 NULL);
57788
57789 /*
57790diff --git a/fs/cachefiles/bind.c b/fs/cachefiles/bind.c
57791index 622f469..e8d2d55 100644
57792--- a/fs/cachefiles/bind.c
57793+++ b/fs/cachefiles/bind.c
57794@@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachefiles_cache *cache, char *args)
57795 args);
57796
57797 /* start by checking things over */
57798- ASSERT(cache->fstop_percent >= 0 &&
57799- cache->fstop_percent < cache->fcull_percent &&
57800+ ASSERT(cache->fstop_percent < cache->fcull_percent &&
57801 cache->fcull_percent < cache->frun_percent &&
57802 cache->frun_percent < 100);
57803
57804- ASSERT(cache->bstop_percent >= 0 &&
57805- cache->bstop_percent < cache->bcull_percent &&
57806+ ASSERT(cache->bstop_percent < cache->bcull_percent &&
57807 cache->bcull_percent < cache->brun_percent &&
57808 cache->brun_percent < 100);
57809
57810diff --git a/fs/cachefiles/daemon.c b/fs/cachefiles/daemon.c
57811index 0a1467b..6a53245 100644
57812--- a/fs/cachefiles/daemon.c
57813+++ b/fs/cachefiles/daemon.c
57814@@ -196,7 +196,7 @@ static ssize_t cachefiles_daemon_read(struct file *file, char __user *_buffer,
57815 if (n > buflen)
57816 return -EMSGSIZE;
57817
57818- if (copy_to_user(_buffer, buffer, n) != 0)
57819+ if (n > sizeof(buffer) || copy_to_user(_buffer, buffer, n) != 0)
57820 return -EFAULT;
57821
57822 return n;
57823@@ -222,7 +222,7 @@ static ssize_t cachefiles_daemon_write(struct file *file,
57824 if (test_bit(CACHEFILES_DEAD, &cache->flags))
57825 return -EIO;
57826
57827- if (datalen < 0 || datalen > PAGE_SIZE - 1)
57828+ if (datalen > PAGE_SIZE - 1)
57829 return -EOPNOTSUPP;
57830
57831 /* drag the command string into the kernel so we can parse it */
57832@@ -386,7 +386,7 @@ static int cachefiles_daemon_fstop(struct cachefiles_cache *cache, char *args)
57833 if (args[0] != '%' || args[1] != '\0')
57834 return -EINVAL;
57835
57836- if (fstop < 0 || fstop >= cache->fcull_percent)
57837+ if (fstop >= cache->fcull_percent)
57838 return cachefiles_daemon_range_error(cache, args);
57839
57840 cache->fstop_percent = fstop;
57841@@ -458,7 +458,7 @@ static int cachefiles_daemon_bstop(struct cachefiles_cache *cache, char *args)
57842 if (args[0] != '%' || args[1] != '\0')
57843 return -EINVAL;
57844
57845- if (bstop < 0 || bstop >= cache->bcull_percent)
57846+ if (bstop >= cache->bcull_percent)
57847 return cachefiles_daemon_range_error(cache, args);
57848
57849 cache->bstop_percent = bstop;
57850diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h
57851index 5349473..d6c0b93 100644
57852--- a/fs/cachefiles/internal.h
57853+++ b/fs/cachefiles/internal.h
57854@@ -59,7 +59,7 @@ struct cachefiles_cache {
57855 wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
57856 struct rb_root active_nodes; /* active nodes (can't be culled) */
57857 rwlock_t active_lock; /* lock for active_nodes */
57858- atomic_t gravecounter; /* graveyard uniquifier */
57859+ atomic_unchecked_t gravecounter; /* graveyard uniquifier */
57860 unsigned frun_percent; /* when to stop culling (% files) */
57861 unsigned fcull_percent; /* when to start culling (% files) */
57862 unsigned fstop_percent; /* when to stop allocating (% files) */
57863@@ -171,19 +171,19 @@ extern int cachefiles_check_in_use(struct cachefiles_cache *cache,
57864 * proc.c
57865 */
57866 #ifdef CONFIG_CACHEFILES_HISTOGRAM
57867-extern atomic_t cachefiles_lookup_histogram[HZ];
57868-extern atomic_t cachefiles_mkdir_histogram[HZ];
57869-extern atomic_t cachefiles_create_histogram[HZ];
57870+extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
57871+extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
57872+extern atomic_unchecked_t cachefiles_create_histogram[HZ];
57873
57874 extern int __init cachefiles_proc_init(void);
57875 extern void cachefiles_proc_cleanup(void);
57876 static inline
57877-void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
57878+void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
57879 {
57880 unsigned long jif = jiffies - start_jif;
57881 if (jif >= HZ)
57882 jif = HZ - 1;
57883- atomic_inc(&histogram[jif]);
57884+ atomic_inc_unchecked(&histogram[jif]);
57885 }
57886
57887 #else
57888diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
57889index ca65f39..48921e3 100644
57890--- a/fs/cachefiles/namei.c
57891+++ b/fs/cachefiles/namei.c
57892@@ -317,7 +317,7 @@ try_again:
57893 /* first step is to make up a grave dentry in the graveyard */
57894 sprintf(nbuffer, "%08x%08x",
57895 (uint32_t) get_seconds(),
57896- (uint32_t) atomic_inc_return(&cache->gravecounter));
57897+ (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
57898
57899 /* do the multiway lock magic */
57900 trap = lock_rename(cache->graveyard, dir);
57901diff --git a/fs/cachefiles/proc.c b/fs/cachefiles/proc.c
57902index eccd339..4c1d995 100644
57903--- a/fs/cachefiles/proc.c
57904+++ b/fs/cachefiles/proc.c
57905@@ -14,9 +14,9 @@
57906 #include <linux/seq_file.h>
57907 #include "internal.h"
57908
57909-atomic_t cachefiles_lookup_histogram[HZ];
57910-atomic_t cachefiles_mkdir_histogram[HZ];
57911-atomic_t cachefiles_create_histogram[HZ];
57912+atomic_unchecked_t cachefiles_lookup_histogram[HZ];
57913+atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
57914+atomic_unchecked_t cachefiles_create_histogram[HZ];
57915
57916 /*
57917 * display the latency histogram
57918@@ -35,9 +35,9 @@ static int cachefiles_histogram_show(struct seq_file *m, void *v)
57919 return 0;
57920 default:
57921 index = (unsigned long) v - 3;
57922- x = atomic_read(&cachefiles_lookup_histogram[index]);
57923- y = atomic_read(&cachefiles_mkdir_histogram[index]);
57924- z = atomic_read(&cachefiles_create_histogram[index]);
57925+ x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
57926+ y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
57927+ z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
57928 if (x == 0 && y == 0 && z == 0)
57929 return 0;
57930
57931diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c
57932index ebaff36..7e3ea26 100644
57933--- a/fs/cachefiles/rdwr.c
57934+++ b/fs/cachefiles/rdwr.c
57935@@ -950,7 +950,7 @@ int cachefiles_write_page(struct fscache_storage *op, struct page *page)
57936 old_fs = get_fs();
57937 set_fs(KERNEL_DS);
57938 ret = file->f_op->write(
57939- file, (const void __user *) data, len, &pos);
57940+ file, (const void __force_user *) data, len, &pos);
57941 set_fs(old_fs);
57942 kunmap(page);
57943 file_end_write(file);
57944diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
57945index 5e0982a..b7e82bc 100644
57946--- a/fs/ceph/dir.c
57947+++ b/fs/ceph/dir.c
57948@@ -248,7 +248,7 @@ static int ceph_readdir(struct file *file, struct dir_context *ctx)
57949 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
57950 struct ceph_mds_client *mdsc = fsc->mdsc;
57951 unsigned frag = fpos_frag(ctx->pos);
57952- int off = fpos_off(ctx->pos);
57953+ unsigned int off = fpos_off(ctx->pos);
57954 int err;
57955 u32 ftype;
57956 struct ceph_mds_reply_info_parsed *rinfo;
57957diff --git a/fs/ceph/super.c b/fs/ceph/super.c
57958index 10a4ccb..92dbc5e 100644
57959--- a/fs/ceph/super.c
57960+++ b/fs/ceph/super.c
57961@@ -895,7 +895,7 @@ static int ceph_compare_super(struct super_block *sb, void *data)
57962 /*
57963 * construct our own bdi so we can control readahead, etc.
57964 */
57965-static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0);
57966+static atomic_long_unchecked_t bdi_seq = ATOMIC_LONG_INIT(0);
57967
57968 static int ceph_register_bdi(struct super_block *sb,
57969 struct ceph_fs_client *fsc)
57970@@ -912,7 +912,7 @@ static int ceph_register_bdi(struct super_block *sb,
57971 default_backing_dev_info.ra_pages;
57972
57973 err = bdi_register(&fsc->backing_dev_info, NULL, "ceph-%ld",
57974- atomic_long_inc_return(&bdi_seq));
57975+ atomic_long_inc_return_unchecked(&bdi_seq));
57976 if (!err)
57977 sb->s_bdi = &fsc->backing_dev_info;
57978 return err;
57979diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
57980index f3ac415..3d2420c 100644
57981--- a/fs/cifs/cifs_debug.c
57982+++ b/fs/cifs/cifs_debug.c
57983@@ -286,8 +286,8 @@ static ssize_t cifs_stats_proc_write(struct file *file,
57984
57985 if (c == '1' || c == 'y' || c == 'Y' || c == '0') {
57986 #ifdef CONFIG_CIFS_STATS2
57987- atomic_set(&totBufAllocCount, 0);
57988- atomic_set(&totSmBufAllocCount, 0);
57989+ atomic_set_unchecked(&totBufAllocCount, 0);
57990+ atomic_set_unchecked(&totSmBufAllocCount, 0);
57991 #endif /* CONFIG_CIFS_STATS2 */
57992 spin_lock(&cifs_tcp_ses_lock);
57993 list_for_each(tmp1, &cifs_tcp_ses_list) {
57994@@ -300,7 +300,7 @@ static ssize_t cifs_stats_proc_write(struct file *file,
57995 tcon = list_entry(tmp3,
57996 struct cifs_tcon,
57997 tcon_list);
57998- atomic_set(&tcon->num_smbs_sent, 0);
57999+ atomic_set_unchecked(&tcon->num_smbs_sent, 0);
58000 if (server->ops->clear_stats)
58001 server->ops->clear_stats(tcon);
58002 }
58003@@ -332,8 +332,8 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
58004 smBufAllocCount.counter, cifs_min_small);
58005 #ifdef CONFIG_CIFS_STATS2
58006 seq_printf(m, "Total Large %d Small %d Allocations\n",
58007- atomic_read(&totBufAllocCount),
58008- atomic_read(&totSmBufAllocCount));
58009+ atomic_read_unchecked(&totBufAllocCount),
58010+ atomic_read_unchecked(&totSmBufAllocCount));
58011 #endif /* CONFIG_CIFS_STATS2 */
58012
58013 seq_printf(m, "Operations (MIDs): %d\n", atomic_read(&midCount));
58014@@ -362,7 +362,7 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
58015 if (tcon->need_reconnect)
58016 seq_puts(m, "\tDISCONNECTED ");
58017 seq_printf(m, "\nSMBs: %d",
58018- atomic_read(&tcon->num_smbs_sent));
58019+ atomic_read_unchecked(&tcon->num_smbs_sent));
58020 if (server->ops->print_stats)
58021 server->ops->print_stats(m, tcon);
58022 }
58023diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
58024index 7c6b73c..a8f0db2 100644
58025--- a/fs/cifs/cifsfs.c
58026+++ b/fs/cifs/cifsfs.c
58027@@ -1068,7 +1068,7 @@ cifs_init_request_bufs(void)
58028 */
58029 cifs_req_cachep = kmem_cache_create("cifs_request",
58030 CIFSMaxBufSize + max_hdr_size, 0,
58031- SLAB_HWCACHE_ALIGN, NULL);
58032+ SLAB_HWCACHE_ALIGN | SLAB_USERCOPY, NULL);
58033 if (cifs_req_cachep == NULL)
58034 return -ENOMEM;
58035
58036@@ -1095,7 +1095,7 @@ cifs_init_request_bufs(void)
58037 efficient to alloc 1 per page off the slab compared to 17K (5page)
58038 alloc of large cifs buffers even when page debugging is on */
58039 cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
58040- MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
58041+ MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN | SLAB_USERCOPY,
58042 NULL);
58043 if (cifs_sm_req_cachep == NULL) {
58044 mempool_destroy(cifs_req_poolp);
58045@@ -1180,8 +1180,8 @@ init_cifs(void)
58046 atomic_set(&bufAllocCount, 0);
58047 atomic_set(&smBufAllocCount, 0);
58048 #ifdef CONFIG_CIFS_STATS2
58049- atomic_set(&totBufAllocCount, 0);
58050- atomic_set(&totSmBufAllocCount, 0);
58051+ atomic_set_unchecked(&totBufAllocCount, 0);
58052+ atomic_set_unchecked(&totSmBufAllocCount, 0);
58053 #endif /* CONFIG_CIFS_STATS2 */
58054
58055 atomic_set(&midCount, 0);
58056diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
58057index 30f6e92..e915ba5 100644
58058--- a/fs/cifs/cifsglob.h
58059+++ b/fs/cifs/cifsglob.h
58060@@ -806,35 +806,35 @@ struct cifs_tcon {
58061 __u16 Flags; /* optional support bits */
58062 enum statusEnum tidStatus;
58063 #ifdef CONFIG_CIFS_STATS
58064- atomic_t num_smbs_sent;
58065+ atomic_unchecked_t num_smbs_sent;
58066 union {
58067 struct {
58068- atomic_t num_writes;
58069- atomic_t num_reads;
58070- atomic_t num_flushes;
58071- atomic_t num_oplock_brks;
58072- atomic_t num_opens;
58073- atomic_t num_closes;
58074- atomic_t num_deletes;
58075- atomic_t num_mkdirs;
58076- atomic_t num_posixopens;
58077- atomic_t num_posixmkdirs;
58078- atomic_t num_rmdirs;
58079- atomic_t num_renames;
58080- atomic_t num_t2renames;
58081- atomic_t num_ffirst;
58082- atomic_t num_fnext;
58083- atomic_t num_fclose;
58084- atomic_t num_hardlinks;
58085- atomic_t num_symlinks;
58086- atomic_t num_locks;
58087- atomic_t num_acl_get;
58088- atomic_t num_acl_set;
58089+ atomic_unchecked_t num_writes;
58090+ atomic_unchecked_t num_reads;
58091+ atomic_unchecked_t num_flushes;
58092+ atomic_unchecked_t num_oplock_brks;
58093+ atomic_unchecked_t num_opens;
58094+ atomic_unchecked_t num_closes;
58095+ atomic_unchecked_t num_deletes;
58096+ atomic_unchecked_t num_mkdirs;
58097+ atomic_unchecked_t num_posixopens;
58098+ atomic_unchecked_t num_posixmkdirs;
58099+ atomic_unchecked_t num_rmdirs;
58100+ atomic_unchecked_t num_renames;
58101+ atomic_unchecked_t num_t2renames;
58102+ atomic_unchecked_t num_ffirst;
58103+ atomic_unchecked_t num_fnext;
58104+ atomic_unchecked_t num_fclose;
58105+ atomic_unchecked_t num_hardlinks;
58106+ atomic_unchecked_t num_symlinks;
58107+ atomic_unchecked_t num_locks;
58108+ atomic_unchecked_t num_acl_get;
58109+ atomic_unchecked_t num_acl_set;
58110 } cifs_stats;
58111 #ifdef CONFIG_CIFS_SMB2
58112 struct {
58113- atomic_t smb2_com_sent[NUMBER_OF_SMB2_COMMANDS];
58114- atomic_t smb2_com_failed[NUMBER_OF_SMB2_COMMANDS];
58115+ atomic_unchecked_t smb2_com_sent[NUMBER_OF_SMB2_COMMANDS];
58116+ atomic_unchecked_t smb2_com_failed[NUMBER_OF_SMB2_COMMANDS];
58117 } smb2_stats;
58118 #endif /* CONFIG_CIFS_SMB2 */
58119 } stats;
58120@@ -1170,7 +1170,7 @@ convert_delimiter(char *path, char delim)
58121 }
58122
58123 #ifdef CONFIG_CIFS_STATS
58124-#define cifs_stats_inc atomic_inc
58125+#define cifs_stats_inc atomic_inc_unchecked
58126
58127 static inline void cifs_stats_bytes_written(struct cifs_tcon *tcon,
58128 unsigned int bytes)
58129@@ -1536,8 +1536,8 @@ GLOBAL_EXTERN atomic_t tconInfoReconnectCount;
58130 /* Various Debug counters */
58131 GLOBAL_EXTERN atomic_t bufAllocCount; /* current number allocated */
58132 #ifdef CONFIG_CIFS_STATS2
58133-GLOBAL_EXTERN atomic_t totBufAllocCount; /* total allocated over all time */
58134-GLOBAL_EXTERN atomic_t totSmBufAllocCount;
58135+GLOBAL_EXTERN atomic_unchecked_t totBufAllocCount; /* total allocated over all time */
58136+GLOBAL_EXTERN atomic_unchecked_t totSmBufAllocCount;
58137 #endif
58138 GLOBAL_EXTERN atomic_t smBufAllocCount;
58139 GLOBAL_EXTERN atomic_t midCount;
58140diff --git a/fs/cifs/file.c b/fs/cifs/file.c
58141index 87c4dd0..a90f115 100644
58142--- a/fs/cifs/file.c
58143+++ b/fs/cifs/file.c
58144@@ -1900,10 +1900,14 @@ static int cifs_writepages(struct address_space *mapping,
58145 index = mapping->writeback_index; /* Start from prev offset */
58146 end = -1;
58147 } else {
58148- index = wbc->range_start >> PAGE_CACHE_SHIFT;
58149- end = wbc->range_end >> PAGE_CACHE_SHIFT;
58150- if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
58151+ if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) {
58152 range_whole = true;
58153+ index = 0;
58154+ end = ULONG_MAX;
58155+ } else {
58156+ index = wbc->range_start >> PAGE_CACHE_SHIFT;
58157+ end = wbc->range_end >> PAGE_CACHE_SHIFT;
58158+ }
58159 scanned = true;
58160 }
58161 retry:
58162diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
58163index 3b0c62e..f7d090c 100644
58164--- a/fs/cifs/misc.c
58165+++ b/fs/cifs/misc.c
58166@@ -170,7 +170,7 @@ cifs_buf_get(void)
58167 memset(ret_buf, 0, buf_size + 3);
58168 atomic_inc(&bufAllocCount);
58169 #ifdef CONFIG_CIFS_STATS2
58170- atomic_inc(&totBufAllocCount);
58171+ atomic_inc_unchecked(&totBufAllocCount);
58172 #endif /* CONFIG_CIFS_STATS2 */
58173 }
58174
58175@@ -205,7 +205,7 @@ cifs_small_buf_get(void)
58176 /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
58177 atomic_inc(&smBufAllocCount);
58178 #ifdef CONFIG_CIFS_STATS2
58179- atomic_inc(&totSmBufAllocCount);
58180+ atomic_inc_unchecked(&totSmBufAllocCount);
58181 #endif /* CONFIG_CIFS_STATS2 */
58182
58183 }
58184diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c
58185index d1fdfa8..94558f8 100644
58186--- a/fs/cifs/smb1ops.c
58187+++ b/fs/cifs/smb1ops.c
58188@@ -626,27 +626,27 @@ static void
58189 cifs_clear_stats(struct cifs_tcon *tcon)
58190 {
58191 #ifdef CONFIG_CIFS_STATS
58192- atomic_set(&tcon->stats.cifs_stats.num_writes, 0);
58193- atomic_set(&tcon->stats.cifs_stats.num_reads, 0);
58194- atomic_set(&tcon->stats.cifs_stats.num_flushes, 0);
58195- atomic_set(&tcon->stats.cifs_stats.num_oplock_brks, 0);
58196- atomic_set(&tcon->stats.cifs_stats.num_opens, 0);
58197- atomic_set(&tcon->stats.cifs_stats.num_posixopens, 0);
58198- atomic_set(&tcon->stats.cifs_stats.num_posixmkdirs, 0);
58199- atomic_set(&tcon->stats.cifs_stats.num_closes, 0);
58200- atomic_set(&tcon->stats.cifs_stats.num_deletes, 0);
58201- atomic_set(&tcon->stats.cifs_stats.num_mkdirs, 0);
58202- atomic_set(&tcon->stats.cifs_stats.num_rmdirs, 0);
58203- atomic_set(&tcon->stats.cifs_stats.num_renames, 0);
58204- atomic_set(&tcon->stats.cifs_stats.num_t2renames, 0);
58205- atomic_set(&tcon->stats.cifs_stats.num_ffirst, 0);
58206- atomic_set(&tcon->stats.cifs_stats.num_fnext, 0);
58207- atomic_set(&tcon->stats.cifs_stats.num_fclose, 0);
58208- atomic_set(&tcon->stats.cifs_stats.num_hardlinks, 0);
58209- atomic_set(&tcon->stats.cifs_stats.num_symlinks, 0);
58210- atomic_set(&tcon->stats.cifs_stats.num_locks, 0);
58211- atomic_set(&tcon->stats.cifs_stats.num_acl_get, 0);
58212- atomic_set(&tcon->stats.cifs_stats.num_acl_set, 0);
58213+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_writes, 0);
58214+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_reads, 0);
58215+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_flushes, 0);
58216+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_oplock_brks, 0);
58217+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_opens, 0);
58218+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_posixopens, 0);
58219+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_posixmkdirs, 0);
58220+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_closes, 0);
58221+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_deletes, 0);
58222+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_mkdirs, 0);
58223+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_rmdirs, 0);
58224+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_renames, 0);
58225+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_t2renames, 0);
58226+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_ffirst, 0);
58227+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_fnext, 0);
58228+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_fclose, 0);
58229+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_hardlinks, 0);
58230+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_symlinks, 0);
58231+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_locks, 0);
58232+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_acl_get, 0);
58233+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_acl_set, 0);
58234 #endif
58235 }
58236
58237@@ -655,36 +655,36 @@ cifs_print_stats(struct seq_file *m, struct cifs_tcon *tcon)
58238 {
58239 #ifdef CONFIG_CIFS_STATS
58240 seq_printf(m, " Oplocks breaks: %d",
58241- atomic_read(&tcon->stats.cifs_stats.num_oplock_brks));
58242+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_oplock_brks));
58243 seq_printf(m, "\nReads: %d Bytes: %llu",
58244- atomic_read(&tcon->stats.cifs_stats.num_reads),
58245+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_reads),
58246 (long long)(tcon->bytes_read));
58247 seq_printf(m, "\nWrites: %d Bytes: %llu",
58248- atomic_read(&tcon->stats.cifs_stats.num_writes),
58249+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_writes),
58250 (long long)(tcon->bytes_written));
58251 seq_printf(m, "\nFlushes: %d",
58252- atomic_read(&tcon->stats.cifs_stats.num_flushes));
58253+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_flushes));
58254 seq_printf(m, "\nLocks: %d HardLinks: %d Symlinks: %d",
58255- atomic_read(&tcon->stats.cifs_stats.num_locks),
58256- atomic_read(&tcon->stats.cifs_stats.num_hardlinks),
58257- atomic_read(&tcon->stats.cifs_stats.num_symlinks));
58258+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_locks),
58259+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_hardlinks),
58260+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_symlinks));
58261 seq_printf(m, "\nOpens: %d Closes: %d Deletes: %d",
58262- atomic_read(&tcon->stats.cifs_stats.num_opens),
58263- atomic_read(&tcon->stats.cifs_stats.num_closes),
58264- atomic_read(&tcon->stats.cifs_stats.num_deletes));
58265+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_opens),
58266+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_closes),
58267+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_deletes));
58268 seq_printf(m, "\nPosix Opens: %d Posix Mkdirs: %d",
58269- atomic_read(&tcon->stats.cifs_stats.num_posixopens),
58270- atomic_read(&tcon->stats.cifs_stats.num_posixmkdirs));
58271+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_posixopens),
58272+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_posixmkdirs));
58273 seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
58274- atomic_read(&tcon->stats.cifs_stats.num_mkdirs),
58275- atomic_read(&tcon->stats.cifs_stats.num_rmdirs));
58276+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_mkdirs),
58277+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_rmdirs));
58278 seq_printf(m, "\nRenames: %d T2 Renames %d",
58279- atomic_read(&tcon->stats.cifs_stats.num_renames),
58280- atomic_read(&tcon->stats.cifs_stats.num_t2renames));
58281+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_renames),
58282+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_t2renames));
58283 seq_printf(m, "\nFindFirst: %d FNext %d FClose %d",
58284- atomic_read(&tcon->stats.cifs_stats.num_ffirst),
58285- atomic_read(&tcon->stats.cifs_stats.num_fnext),
58286- atomic_read(&tcon->stats.cifs_stats.num_fclose));
58287+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_ffirst),
58288+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_fnext),
58289+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_fclose));
58290 #endif
58291 }
58292
58293diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
58294index 35ddc3e..563e809 100644
58295--- a/fs/cifs/smb2ops.c
58296+++ b/fs/cifs/smb2ops.c
58297@@ -364,8 +364,8 @@ smb2_clear_stats(struct cifs_tcon *tcon)
58298 #ifdef CONFIG_CIFS_STATS
58299 int i;
58300 for (i = 0; i < NUMBER_OF_SMB2_COMMANDS; i++) {
58301- atomic_set(&tcon->stats.smb2_stats.smb2_com_sent[i], 0);
58302- atomic_set(&tcon->stats.smb2_stats.smb2_com_failed[i], 0);
58303+ atomic_set_unchecked(&tcon->stats.smb2_stats.smb2_com_sent[i], 0);
58304+ atomic_set_unchecked(&tcon->stats.smb2_stats.smb2_com_failed[i], 0);
58305 }
58306 #endif
58307 }
58308@@ -405,65 +405,65 @@ static void
58309 smb2_print_stats(struct seq_file *m, struct cifs_tcon *tcon)
58310 {
58311 #ifdef CONFIG_CIFS_STATS
58312- atomic_t *sent = tcon->stats.smb2_stats.smb2_com_sent;
58313- atomic_t *failed = tcon->stats.smb2_stats.smb2_com_failed;
58314+ atomic_unchecked_t *sent = tcon->stats.smb2_stats.smb2_com_sent;
58315+ atomic_unchecked_t *failed = tcon->stats.smb2_stats.smb2_com_failed;
58316 seq_printf(m, "\nNegotiates: %d sent %d failed",
58317- atomic_read(&sent[SMB2_NEGOTIATE_HE]),
58318- atomic_read(&failed[SMB2_NEGOTIATE_HE]));
58319+ atomic_read_unchecked(&sent[SMB2_NEGOTIATE_HE]),
58320+ atomic_read_unchecked(&failed[SMB2_NEGOTIATE_HE]));
58321 seq_printf(m, "\nSessionSetups: %d sent %d failed",
58322- atomic_read(&sent[SMB2_SESSION_SETUP_HE]),
58323- atomic_read(&failed[SMB2_SESSION_SETUP_HE]));
58324+ atomic_read_unchecked(&sent[SMB2_SESSION_SETUP_HE]),
58325+ atomic_read_unchecked(&failed[SMB2_SESSION_SETUP_HE]));
58326 seq_printf(m, "\nLogoffs: %d sent %d failed",
58327- atomic_read(&sent[SMB2_LOGOFF_HE]),
58328- atomic_read(&failed[SMB2_LOGOFF_HE]));
58329+ atomic_read_unchecked(&sent[SMB2_LOGOFF_HE]),
58330+ atomic_read_unchecked(&failed[SMB2_LOGOFF_HE]));
58331 seq_printf(m, "\nTreeConnects: %d sent %d failed",
58332- atomic_read(&sent[SMB2_TREE_CONNECT_HE]),
58333- atomic_read(&failed[SMB2_TREE_CONNECT_HE]));
58334+ atomic_read_unchecked(&sent[SMB2_TREE_CONNECT_HE]),
58335+ atomic_read_unchecked(&failed[SMB2_TREE_CONNECT_HE]));
58336 seq_printf(m, "\nTreeDisconnects: %d sent %d failed",
58337- atomic_read(&sent[SMB2_TREE_DISCONNECT_HE]),
58338- atomic_read(&failed[SMB2_TREE_DISCONNECT_HE]));
58339+ atomic_read_unchecked(&sent[SMB2_TREE_DISCONNECT_HE]),
58340+ atomic_read_unchecked(&failed[SMB2_TREE_DISCONNECT_HE]));
58341 seq_printf(m, "\nCreates: %d sent %d failed",
58342- atomic_read(&sent[SMB2_CREATE_HE]),
58343- atomic_read(&failed[SMB2_CREATE_HE]));
58344+ atomic_read_unchecked(&sent[SMB2_CREATE_HE]),
58345+ atomic_read_unchecked(&failed[SMB2_CREATE_HE]));
58346 seq_printf(m, "\nCloses: %d sent %d failed",
58347- atomic_read(&sent[SMB2_CLOSE_HE]),
58348- atomic_read(&failed[SMB2_CLOSE_HE]));
58349+ atomic_read_unchecked(&sent[SMB2_CLOSE_HE]),
58350+ atomic_read_unchecked(&failed[SMB2_CLOSE_HE]));
58351 seq_printf(m, "\nFlushes: %d sent %d failed",
58352- atomic_read(&sent[SMB2_FLUSH_HE]),
58353- atomic_read(&failed[SMB2_FLUSH_HE]));
58354+ atomic_read_unchecked(&sent[SMB2_FLUSH_HE]),
58355+ atomic_read_unchecked(&failed[SMB2_FLUSH_HE]));
58356 seq_printf(m, "\nReads: %d sent %d failed",
58357- atomic_read(&sent[SMB2_READ_HE]),
58358- atomic_read(&failed[SMB2_READ_HE]));
58359+ atomic_read_unchecked(&sent[SMB2_READ_HE]),
58360+ atomic_read_unchecked(&failed[SMB2_READ_HE]));
58361 seq_printf(m, "\nWrites: %d sent %d failed",
58362- atomic_read(&sent[SMB2_WRITE_HE]),
58363- atomic_read(&failed[SMB2_WRITE_HE]));
58364+ atomic_read_unchecked(&sent[SMB2_WRITE_HE]),
58365+ atomic_read_unchecked(&failed[SMB2_WRITE_HE]));
58366 seq_printf(m, "\nLocks: %d sent %d failed",
58367- atomic_read(&sent[SMB2_LOCK_HE]),
58368- atomic_read(&failed[SMB2_LOCK_HE]));
58369+ atomic_read_unchecked(&sent[SMB2_LOCK_HE]),
58370+ atomic_read_unchecked(&failed[SMB2_LOCK_HE]));
58371 seq_printf(m, "\nIOCTLs: %d sent %d failed",
58372- atomic_read(&sent[SMB2_IOCTL_HE]),
58373- atomic_read(&failed[SMB2_IOCTL_HE]));
58374+ atomic_read_unchecked(&sent[SMB2_IOCTL_HE]),
58375+ atomic_read_unchecked(&failed[SMB2_IOCTL_HE]));
58376 seq_printf(m, "\nCancels: %d sent %d failed",
58377- atomic_read(&sent[SMB2_CANCEL_HE]),
58378- atomic_read(&failed[SMB2_CANCEL_HE]));
58379+ atomic_read_unchecked(&sent[SMB2_CANCEL_HE]),
58380+ atomic_read_unchecked(&failed[SMB2_CANCEL_HE]));
58381 seq_printf(m, "\nEchos: %d sent %d failed",
58382- atomic_read(&sent[SMB2_ECHO_HE]),
58383- atomic_read(&failed[SMB2_ECHO_HE]));
58384+ atomic_read_unchecked(&sent[SMB2_ECHO_HE]),
58385+ atomic_read_unchecked(&failed[SMB2_ECHO_HE]));
58386 seq_printf(m, "\nQueryDirectories: %d sent %d failed",
58387- atomic_read(&sent[SMB2_QUERY_DIRECTORY_HE]),
58388- atomic_read(&failed[SMB2_QUERY_DIRECTORY_HE]));
58389+ atomic_read_unchecked(&sent[SMB2_QUERY_DIRECTORY_HE]),
58390+ atomic_read_unchecked(&failed[SMB2_QUERY_DIRECTORY_HE]));
58391 seq_printf(m, "\nChangeNotifies: %d sent %d failed",
58392- atomic_read(&sent[SMB2_CHANGE_NOTIFY_HE]),
58393- atomic_read(&failed[SMB2_CHANGE_NOTIFY_HE]));
58394+ atomic_read_unchecked(&sent[SMB2_CHANGE_NOTIFY_HE]),
58395+ atomic_read_unchecked(&failed[SMB2_CHANGE_NOTIFY_HE]));
58396 seq_printf(m, "\nQueryInfos: %d sent %d failed",
58397- atomic_read(&sent[SMB2_QUERY_INFO_HE]),
58398- atomic_read(&failed[SMB2_QUERY_INFO_HE]));
58399+ atomic_read_unchecked(&sent[SMB2_QUERY_INFO_HE]),
58400+ atomic_read_unchecked(&failed[SMB2_QUERY_INFO_HE]));
58401 seq_printf(m, "\nSetInfos: %d sent %d failed",
58402- atomic_read(&sent[SMB2_SET_INFO_HE]),
58403- atomic_read(&failed[SMB2_SET_INFO_HE]));
58404+ atomic_read_unchecked(&sent[SMB2_SET_INFO_HE]),
58405+ atomic_read_unchecked(&failed[SMB2_SET_INFO_HE]));
58406 seq_printf(m, "\nOplockBreaks: %d sent %d failed",
58407- atomic_read(&sent[SMB2_OPLOCK_BREAK_HE]),
58408- atomic_read(&failed[SMB2_OPLOCK_BREAK_HE]));
58409+ atomic_read_unchecked(&sent[SMB2_OPLOCK_BREAK_HE]),
58410+ atomic_read_unchecked(&failed[SMB2_OPLOCK_BREAK_HE]));
58411 #endif
58412 }
58413
58414diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
58415index 8603447..f9caeee 100644
58416--- a/fs/cifs/smb2pdu.c
58417+++ b/fs/cifs/smb2pdu.c
58418@@ -2094,8 +2094,7 @@ SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
58419 default:
58420 cifs_dbg(VFS, "info level %u isn't supported\n",
58421 srch_inf->info_level);
58422- rc = -EINVAL;
58423- goto qdir_exit;
58424+ return -EINVAL;
58425 }
58426
58427 req->FileIndex = cpu_to_le32(index);
58428diff --git a/fs/coda/cache.c b/fs/coda/cache.c
58429index 1da168c..8bc7ff6 100644
58430--- a/fs/coda/cache.c
58431+++ b/fs/coda/cache.c
58432@@ -24,7 +24,7 @@
58433 #include "coda_linux.h"
58434 #include "coda_cache.h"
58435
58436-static atomic_t permission_epoch = ATOMIC_INIT(0);
58437+static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
58438
58439 /* replace or extend an acl cache hit */
58440 void coda_cache_enter(struct inode *inode, int mask)
58441@@ -32,7 +32,7 @@ void coda_cache_enter(struct inode *inode, int mask)
58442 struct coda_inode_info *cii = ITOC(inode);
58443
58444 spin_lock(&cii->c_lock);
58445- cii->c_cached_epoch = atomic_read(&permission_epoch);
58446+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
58447 if (!uid_eq(cii->c_uid, current_fsuid())) {
58448 cii->c_uid = current_fsuid();
58449 cii->c_cached_perm = mask;
58450@@ -46,14 +46,14 @@ void coda_cache_clear_inode(struct inode *inode)
58451 {
58452 struct coda_inode_info *cii = ITOC(inode);
58453 spin_lock(&cii->c_lock);
58454- cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
58455+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
58456 spin_unlock(&cii->c_lock);
58457 }
58458
58459 /* remove all acl caches */
58460 void coda_cache_clear_all(struct super_block *sb)
58461 {
58462- atomic_inc(&permission_epoch);
58463+ atomic_inc_unchecked(&permission_epoch);
58464 }
58465
58466
58467@@ -66,7 +66,7 @@ int coda_cache_check(struct inode *inode, int mask)
58468 spin_lock(&cii->c_lock);
58469 hit = (mask & cii->c_cached_perm) == mask &&
58470 uid_eq(cii->c_uid, current_fsuid()) &&
58471- cii->c_cached_epoch == atomic_read(&permission_epoch);
58472+ cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
58473 spin_unlock(&cii->c_lock);
58474
58475 return hit;
58476diff --git a/fs/compat.c b/fs/compat.c
58477index 6af20de..fec3fbb 100644
58478--- a/fs/compat.c
58479+++ b/fs/compat.c
58480@@ -54,7 +54,7 @@
58481 #include <asm/ioctls.h>
58482 #include "internal.h"
58483
58484-int compat_log = 1;
58485+int compat_log = 0;
58486
58487 int compat_printk(const char *fmt, ...)
58488 {
58489@@ -488,7 +488,7 @@ compat_sys_io_setup(unsigned nr_reqs, u32 __user *ctx32p)
58490
58491 set_fs(KERNEL_DS);
58492 /* The __user pointer cast is valid because of the set_fs() */
58493- ret = sys_io_setup(nr_reqs, (aio_context_t __user *) &ctx64);
58494+ ret = sys_io_setup(nr_reqs, (aio_context_t __force_user *) &ctx64);
58495 set_fs(oldfs);
58496 /* truncating is ok because it's a user address */
58497 if (!ret)
58498@@ -546,7 +546,7 @@ ssize_t compat_rw_copy_check_uvector(int type,
58499 goto out;
58500
58501 ret = -EINVAL;
58502- if (nr_segs > UIO_MAXIOV || nr_segs < 0)
58503+ if (nr_segs > UIO_MAXIOV)
58504 goto out;
58505 if (nr_segs > fast_segs) {
58506 ret = -ENOMEM;
58507@@ -834,6 +834,7 @@ struct compat_old_linux_dirent {
58508 struct compat_readdir_callback {
58509 struct dir_context ctx;
58510 struct compat_old_linux_dirent __user *dirent;
58511+ struct file * file;
58512 int result;
58513 };
58514
58515@@ -851,6 +852,10 @@ static int compat_fillonedir(void *__buf, const char *name, int namlen,
58516 buf->result = -EOVERFLOW;
58517 return -EOVERFLOW;
58518 }
58519+
58520+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
58521+ return 0;
58522+
58523 buf->result++;
58524 dirent = buf->dirent;
58525 if (!access_ok(VERIFY_WRITE, dirent,
58526@@ -882,6 +887,7 @@ asmlinkage long compat_sys_old_readdir(unsigned int fd,
58527 if (!f.file)
58528 return -EBADF;
58529
58530+ buf.file = f.file;
58531 error = iterate_dir(f.file, &buf.ctx);
58532 if (buf.result)
58533 error = buf.result;
58534@@ -901,6 +907,7 @@ struct compat_getdents_callback {
58535 struct dir_context ctx;
58536 struct compat_linux_dirent __user *current_dir;
58537 struct compat_linux_dirent __user *previous;
58538+ struct file * file;
58539 int count;
58540 int error;
58541 };
58542@@ -922,6 +929,10 @@ static int compat_filldir(void *__buf, const char *name, int namlen,
58543 buf->error = -EOVERFLOW;
58544 return -EOVERFLOW;
58545 }
58546+
58547+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
58548+ return 0;
58549+
58550 dirent = buf->previous;
58551 if (dirent) {
58552 if (__put_user(offset, &dirent->d_off))
58553@@ -967,6 +978,7 @@ asmlinkage long compat_sys_getdents(unsigned int fd,
58554 if (!f.file)
58555 return -EBADF;
58556
58557+ buf.file = f.file;
58558 error = iterate_dir(f.file, &buf.ctx);
58559 if (error >= 0)
58560 error = buf.error;
58561@@ -987,6 +999,7 @@ struct compat_getdents_callback64 {
58562 struct dir_context ctx;
58563 struct linux_dirent64 __user *current_dir;
58564 struct linux_dirent64 __user *previous;
58565+ struct file * file;
58566 int count;
58567 int error;
58568 };
58569@@ -1003,6 +1016,10 @@ static int compat_filldir64(void * __buf, const char * name, int namlen, loff_t
58570 buf->error = -EINVAL; /* only used if we fail.. */
58571 if (reclen > buf->count)
58572 return -EINVAL;
58573+
58574+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
58575+ return 0;
58576+
58577 dirent = buf->previous;
58578
58579 if (dirent) {
58580@@ -1052,6 +1069,7 @@ asmlinkage long compat_sys_getdents64(unsigned int fd,
58581 if (!f.file)
58582 return -EBADF;
58583
58584+ buf.file = f.file;
58585 error = iterate_dir(f.file, &buf.ctx);
58586 if (error >= 0)
58587 error = buf.error;
58588diff --git a/fs/compat_binfmt_elf.c b/fs/compat_binfmt_elf.c
58589index a81147e..20bf2b5 100644
58590--- a/fs/compat_binfmt_elf.c
58591+++ b/fs/compat_binfmt_elf.c
58592@@ -30,11 +30,13 @@
58593 #undef elf_phdr
58594 #undef elf_shdr
58595 #undef elf_note
58596+#undef elf_dyn
58597 #undef elf_addr_t
58598 #define elfhdr elf32_hdr
58599 #define elf_phdr elf32_phdr
58600 #define elf_shdr elf32_shdr
58601 #define elf_note elf32_note
58602+#define elf_dyn Elf32_Dyn
58603 #define elf_addr_t Elf32_Addr
58604
58605 /*
58606diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
58607index 3881610..ab3df0b 100644
58608--- a/fs/compat_ioctl.c
58609+++ b/fs/compat_ioctl.c
58610@@ -621,7 +621,7 @@ static int serial_struct_ioctl(unsigned fd, unsigned cmd,
58611 return -EFAULT;
58612 if (__get_user(udata, &ss32->iomem_base))
58613 return -EFAULT;
58614- ss.iomem_base = compat_ptr(udata);
58615+ ss.iomem_base = (unsigned char __force_kernel *)compat_ptr(udata);
58616 if (__get_user(ss.iomem_reg_shift, &ss32->iomem_reg_shift) ||
58617 __get_user(ss.port_high, &ss32->port_high))
58618 return -EFAULT;
58619@@ -703,8 +703,8 @@ static int do_i2c_rdwr_ioctl(unsigned int fd, unsigned int cmd,
58620 for (i = 0; i < nmsgs; i++) {
58621 if (copy_in_user(&tmsgs[i].addr, &umsgs[i].addr, 3*sizeof(u16)))
58622 return -EFAULT;
58623- if (get_user(datap, &umsgs[i].buf) ||
58624- put_user(compat_ptr(datap), &tmsgs[i].buf))
58625+ if (get_user(datap, (u8 __user * __user *)&umsgs[i].buf) ||
58626+ put_user(compat_ptr(datap), (u8 __user * __user *)&tmsgs[i].buf))
58627 return -EFAULT;
58628 }
58629 return sys_ioctl(fd, cmd, (unsigned long)tdata);
58630@@ -797,7 +797,7 @@ static int compat_ioctl_preallocate(struct file *file,
58631 copy_in_user(&p->l_len, &p32->l_len, sizeof(s64)) ||
58632 copy_in_user(&p->l_sysid, &p32->l_sysid, sizeof(s32)) ||
58633 copy_in_user(&p->l_pid, &p32->l_pid, sizeof(u32)) ||
58634- copy_in_user(&p->l_pad, &p32->l_pad, 4*sizeof(u32)))
58635+ copy_in_user(p->l_pad, p32->l_pad, 4*sizeof(u32)))
58636 return -EFAULT;
58637
58638 return ioctl_preallocate(file, p);
58639@@ -1617,8 +1617,8 @@ asmlinkage long compat_sys_ioctl(unsigned int fd, unsigned int cmd,
58640 static int __init init_sys32_ioctl_cmp(const void *p, const void *q)
58641 {
58642 unsigned int a, b;
58643- a = *(unsigned int *)p;
58644- b = *(unsigned int *)q;
58645+ a = *(const unsigned int *)p;
58646+ b = *(const unsigned int *)q;
58647 if (a > b)
58648 return 1;
58649 if (a < b)
58650diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
58651index e081acb..911df21 100644
58652--- a/fs/configfs/dir.c
58653+++ b/fs/configfs/dir.c
58654@@ -1548,7 +1548,8 @@ static int configfs_readdir(struct file *file, struct dir_context *ctx)
58655 }
58656 for (p = q->next; p != &parent_sd->s_children; p = p->next) {
58657 struct configfs_dirent *next;
58658- const char *name;
58659+ const unsigned char * name;
58660+ char d_name[sizeof(next->s_dentry->d_iname)];
58661 int len;
58662 struct inode *inode = NULL;
58663
58664@@ -1557,7 +1558,12 @@ static int configfs_readdir(struct file *file, struct dir_context *ctx)
58665 continue;
58666
58667 name = configfs_get_name(next);
58668- len = strlen(name);
58669+ if (next->s_dentry && name == next->s_dentry->d_iname) {
58670+ len = next->s_dentry->d_name.len;
58671+ memcpy(d_name, name, len);
58672+ name = d_name;
58673+ } else
58674+ len = strlen(name);
58675
58676 /*
58677 * We'll have a dentry and an inode for
58678diff --git a/fs/coredump.c b/fs/coredump.c
58679index 0b2528f..836c55f 100644
58680--- a/fs/coredump.c
58681+++ b/fs/coredump.c
58682@@ -442,8 +442,8 @@ static void wait_for_dump_helpers(struct file *file)
58683 struct pipe_inode_info *pipe = file->private_data;
58684
58685 pipe_lock(pipe);
58686- pipe->readers++;
58687- pipe->writers--;
58688+ atomic_inc(&pipe->readers);
58689+ atomic_dec(&pipe->writers);
58690 wake_up_interruptible_sync(&pipe->wait);
58691 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
58692 pipe_unlock(pipe);
58693@@ -452,11 +452,11 @@ static void wait_for_dump_helpers(struct file *file)
58694 * We actually want wait_event_freezable() but then we need
58695 * to clear TIF_SIGPENDING and improve dump_interrupted().
58696 */
58697- wait_event_interruptible(pipe->wait, pipe->readers == 1);
58698+ wait_event_interruptible(pipe->wait, atomic_read(&pipe->readers) == 1);
58699
58700 pipe_lock(pipe);
58701- pipe->readers--;
58702- pipe->writers++;
58703+ atomic_dec(&pipe->readers);
58704+ atomic_inc(&pipe->writers);
58705 pipe_unlock(pipe);
58706 }
58707
58708@@ -503,7 +503,9 @@ void do_coredump(const siginfo_t *siginfo)
58709 struct files_struct *displaced;
58710 bool need_nonrelative = false;
58711 bool core_dumped = false;
58712- static atomic_t core_dump_count = ATOMIC_INIT(0);
58713+ static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
58714+ long signr = siginfo->si_signo;
58715+ int dumpable;
58716 struct coredump_params cprm = {
58717 .siginfo = siginfo,
58718 .regs = signal_pt_regs(),
58719@@ -516,12 +518,17 @@ void do_coredump(const siginfo_t *siginfo)
58720 .mm_flags = mm->flags,
58721 };
58722
58723- audit_core_dumps(siginfo->si_signo);
58724+ audit_core_dumps(signr);
58725+
58726+ dumpable = __get_dumpable(cprm.mm_flags);
58727+
58728+ if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
58729+ gr_handle_brute_attach(dumpable);
58730
58731 binfmt = mm->binfmt;
58732 if (!binfmt || !binfmt->core_dump)
58733 goto fail;
58734- if (!__get_dumpable(cprm.mm_flags))
58735+ if (!dumpable)
58736 goto fail;
58737
58738 cred = prepare_creds();
58739@@ -540,7 +547,7 @@ void do_coredump(const siginfo_t *siginfo)
58740 need_nonrelative = true;
58741 }
58742
58743- retval = coredump_wait(siginfo->si_signo, &core_state);
58744+ retval = coredump_wait(signr, &core_state);
58745 if (retval < 0)
58746 goto fail_creds;
58747
58748@@ -583,7 +590,7 @@ void do_coredump(const siginfo_t *siginfo)
58749 }
58750 cprm.limit = RLIM_INFINITY;
58751
58752- dump_count = atomic_inc_return(&core_dump_count);
58753+ dump_count = atomic_inc_return_unchecked(&core_dump_count);
58754 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
58755 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
58756 task_tgid_vnr(current), current->comm);
58757@@ -615,6 +622,8 @@ void do_coredump(const siginfo_t *siginfo)
58758 } else {
58759 struct inode *inode;
58760
58761+ gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
58762+
58763 if (cprm.limit < binfmt->min_coredump)
58764 goto fail_unlock;
58765
58766@@ -673,7 +682,7 @@ close_fail:
58767 filp_close(cprm.file, NULL);
58768 fail_dropcount:
58769 if (ispipe)
58770- atomic_dec(&core_dump_count);
58771+ atomic_dec_unchecked(&core_dump_count);
58772 fail_unlock:
58773 kfree(cn.corename);
58774 coredump_finish(mm, core_dumped);
58775@@ -694,6 +703,8 @@ int dump_emit(struct coredump_params *cprm, const void *addr, int nr)
58776 struct file *file = cprm->file;
58777 loff_t pos = file->f_pos;
58778 ssize_t n;
58779+
58780+ gr_learn_resource(current, RLIMIT_CORE, cprm->written + nr, 1);
58781 if (cprm->written + nr > cprm->limit)
58782 return 0;
58783 while (nr) {
58784diff --git a/fs/dcache.c b/fs/dcache.c
58785index 7f3b400..9c911f2 100644
58786--- a/fs/dcache.c
58787+++ b/fs/dcache.c
58788@@ -1495,7 +1495,7 @@ struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
58789 */
58790 dentry->d_iname[DNAME_INLINE_LEN-1] = 0;
58791 if (name->len > DNAME_INLINE_LEN-1) {
58792- dname = kmalloc(name->len + 1, GFP_KERNEL);
58793+ dname = kmalloc(round_up(name->len + 1, sizeof(unsigned long)), GFP_KERNEL);
58794 if (!dname) {
58795 kmem_cache_free(dentry_cache, dentry);
58796 return NULL;
58797@@ -3430,7 +3430,8 @@ void __init vfs_caches_init(unsigned long mempages)
58798 mempages -= reserve;
58799
58800 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
58801- SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
58802+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY|
58803+ SLAB_NO_SANITIZE, NULL);
58804
58805 dcache_init();
58806 inode_init();
58807diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
58808index 9c0444c..628490c 100644
58809--- a/fs/debugfs/inode.c
58810+++ b/fs/debugfs/inode.c
58811@@ -415,7 +415,11 @@ EXPORT_SYMBOL_GPL(debugfs_create_file);
58812 */
58813 struct dentry *debugfs_create_dir(const char *name, struct dentry *parent)
58814 {
58815+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
58816+ return __create_file(name, S_IFDIR | S_IRWXU,
58817+#else
58818 return __create_file(name, S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
58819+#endif
58820 parent, NULL, NULL);
58821 }
58822 EXPORT_SYMBOL_GPL(debugfs_create_dir);
58823diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
58824index b167ca4..a224e19 100644
58825--- a/fs/ecryptfs/inode.c
58826+++ b/fs/ecryptfs/inode.c
58827@@ -673,7 +673,7 @@ static char *ecryptfs_readlink_lower(struct dentry *dentry, size_t *bufsiz)
58828 old_fs = get_fs();
58829 set_fs(get_ds());
58830 rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
58831- (char __user *)lower_buf,
58832+ (char __force_user *)lower_buf,
58833 PATH_MAX);
58834 set_fs(old_fs);
58835 if (rc < 0)
58836diff --git a/fs/ecryptfs/miscdev.c b/fs/ecryptfs/miscdev.c
58837index e4141f2..d8263e8 100644
58838--- a/fs/ecryptfs/miscdev.c
58839+++ b/fs/ecryptfs/miscdev.c
58840@@ -304,7 +304,7 @@ check_list:
58841 goto out_unlock_msg_ctx;
58842 i = PKT_TYPE_SIZE + PKT_CTR_SIZE;
58843 if (msg_ctx->msg) {
58844- if (copy_to_user(&buf[i], packet_length, packet_length_size))
58845+ if (packet_length_size > sizeof(packet_length) || copy_to_user(&buf[i], packet_length, packet_length_size))
58846 goto out_unlock_msg_ctx;
58847 i += packet_length_size;
58848 if (copy_to_user(&buf[i], msg_ctx->msg, msg_ctx->msg_size))
58849diff --git a/fs/exec.c b/fs/exec.c
58850index 31e46b1..f5c70a3 100644
58851--- a/fs/exec.c
58852+++ b/fs/exec.c
58853@@ -55,8 +55,20 @@
58854 #include <linux/pipe_fs_i.h>
58855 #include <linux/oom.h>
58856 #include <linux/compat.h>
58857+#include <linux/random.h>
58858+#include <linux/seq_file.h>
58859+#include <linux/coredump.h>
58860+#include <linux/mman.h>
58861+
58862+#ifdef CONFIG_PAX_REFCOUNT
58863+#include <linux/kallsyms.h>
58864+#include <linux/kdebug.h>
58865+#endif
58866+
58867+#include <trace/events/fs.h>
58868
58869 #include <asm/uaccess.h>
58870+#include <asm/sections.h>
58871 #include <asm/mmu_context.h>
58872 #include <asm/tlb.h>
58873
58874@@ -65,19 +77,34 @@
58875
58876 #include <trace/events/sched.h>
58877
58878+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
58879+void __weak pax_set_initial_flags(struct linux_binprm *bprm)
58880+{
58881+ pr_warn_once("PAX: PAX_HAVE_ACL_FLAGS was enabled without providing the pax_set_initial_flags callback, this is probably not what you wanted.\n");
58882+}
58883+#endif
58884+
58885+#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
58886+void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
58887+EXPORT_SYMBOL(pax_set_initial_flags_func);
58888+#endif
58889+
58890 int suid_dumpable = 0;
58891
58892 static LIST_HEAD(formats);
58893 static DEFINE_RWLOCK(binfmt_lock);
58894
58895+extern int gr_process_kernel_exec_ban(void);
58896+extern int gr_process_suid_exec_ban(const struct linux_binprm *bprm);
58897+
58898 void __register_binfmt(struct linux_binfmt * fmt, int insert)
58899 {
58900 BUG_ON(!fmt);
58901 if (WARN_ON(!fmt->load_binary))
58902 return;
58903 write_lock(&binfmt_lock);
58904- insert ? list_add(&fmt->lh, &formats) :
58905- list_add_tail(&fmt->lh, &formats);
58906+ insert ? pax_list_add((struct list_head *)&fmt->lh, &formats) :
58907+ pax_list_add_tail((struct list_head *)&fmt->lh, &formats);
58908 write_unlock(&binfmt_lock);
58909 }
58910
58911@@ -86,7 +113,7 @@ EXPORT_SYMBOL(__register_binfmt);
58912 void unregister_binfmt(struct linux_binfmt * fmt)
58913 {
58914 write_lock(&binfmt_lock);
58915- list_del(&fmt->lh);
58916+ pax_list_del((struct list_head *)&fmt->lh);
58917 write_unlock(&binfmt_lock);
58918 }
58919
58920@@ -180,18 +207,10 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
58921 int write)
58922 {
58923 struct page *page;
58924- int ret;
58925
58926-#ifdef CONFIG_STACK_GROWSUP
58927- if (write) {
58928- ret = expand_downwards(bprm->vma, pos);
58929- if (ret < 0)
58930- return NULL;
58931- }
58932-#endif
58933- ret = get_user_pages(current, bprm->mm, pos,
58934- 1, write, 1, &page, NULL);
58935- if (ret <= 0)
58936+ if (0 > expand_downwards(bprm->vma, pos))
58937+ return NULL;
58938+ if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
58939 return NULL;
58940
58941 if (write) {
58942@@ -207,6 +226,17 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
58943 if (size <= ARG_MAX)
58944 return page;
58945
58946+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
58947+ // only allow 512KB for argv+env on suid/sgid binaries
58948+ // to prevent easy ASLR exhaustion
58949+ if (((!uid_eq(bprm->cred->euid, current_euid())) ||
58950+ (!gid_eq(bprm->cred->egid, current_egid()))) &&
58951+ (size > (512 * 1024))) {
58952+ put_page(page);
58953+ return NULL;
58954+ }
58955+#endif
58956+
58957 /*
58958 * Limit to 1/4-th the stack size for the argv+env strings.
58959 * This ensures that:
58960@@ -266,6 +296,11 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
58961 vma->vm_end = STACK_TOP_MAX;
58962 vma->vm_start = vma->vm_end - PAGE_SIZE;
58963 vma->vm_flags = VM_SOFTDIRTY | VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
58964+
58965+#ifdef CONFIG_PAX_SEGMEXEC
58966+ vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
58967+#endif
58968+
58969 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
58970 INIT_LIST_HEAD(&vma->anon_vma_chain);
58971
58972@@ -276,6 +311,12 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
58973 mm->stack_vm = mm->total_vm = 1;
58974 up_write(&mm->mmap_sem);
58975 bprm->p = vma->vm_end - sizeof(void *);
58976+
58977+#ifdef CONFIG_PAX_RANDUSTACK
58978+ if (randomize_va_space)
58979+ bprm->p ^= prandom_u32() & ~PAGE_MASK;
58980+#endif
58981+
58982 return 0;
58983 err:
58984 up_write(&mm->mmap_sem);
58985@@ -396,7 +437,7 @@ struct user_arg_ptr {
58986 } ptr;
58987 };
58988
58989-static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
58990+const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
58991 {
58992 const char __user *native;
58993
58994@@ -405,14 +446,14 @@ static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
58995 compat_uptr_t compat;
58996
58997 if (get_user(compat, argv.ptr.compat + nr))
58998- return ERR_PTR(-EFAULT);
58999+ return (const char __force_user *)ERR_PTR(-EFAULT);
59000
59001 return compat_ptr(compat);
59002 }
59003 #endif
59004
59005 if (get_user(native, argv.ptr.native + nr))
59006- return ERR_PTR(-EFAULT);
59007+ return (const char __force_user *)ERR_PTR(-EFAULT);
59008
59009 return native;
59010 }
59011@@ -431,7 +472,7 @@ static int count(struct user_arg_ptr argv, int max)
59012 if (!p)
59013 break;
59014
59015- if (IS_ERR(p))
59016+ if (IS_ERR((const char __force_kernel *)p))
59017 return -EFAULT;
59018
59019 if (i >= max)
59020@@ -466,7 +507,7 @@ static int copy_strings(int argc, struct user_arg_ptr argv,
59021
59022 ret = -EFAULT;
59023 str = get_user_arg_ptr(argv, argc);
59024- if (IS_ERR(str))
59025+ if (IS_ERR((const char __force_kernel *)str))
59026 goto out;
59027
59028 len = strnlen_user(str, MAX_ARG_STRLEN);
59029@@ -548,7 +589,7 @@ int copy_strings_kernel(int argc, const char *const *__argv,
59030 int r;
59031 mm_segment_t oldfs = get_fs();
59032 struct user_arg_ptr argv = {
59033- .ptr.native = (const char __user *const __user *)__argv,
59034+ .ptr.native = (const char __user * const __force_user *)__argv,
59035 };
59036
59037 set_fs(KERNEL_DS);
59038@@ -583,7 +624,8 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
59039 unsigned long new_end = old_end - shift;
59040 struct mmu_gather tlb;
59041
59042- BUG_ON(new_start > new_end);
59043+ if (new_start >= new_end || new_start < mmap_min_addr)
59044+ return -ENOMEM;
59045
59046 /*
59047 * ensure there are no vmas between where we want to go
59048@@ -592,6 +634,10 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
59049 if (vma != find_vma(mm, new_start))
59050 return -EFAULT;
59051
59052+#ifdef CONFIG_PAX_SEGMEXEC
59053+ BUG_ON(pax_find_mirror_vma(vma));
59054+#endif
59055+
59056 /*
59057 * cover the whole range: [new_start, old_end)
59058 */
59059@@ -672,10 +718,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
59060 stack_top = arch_align_stack(stack_top);
59061 stack_top = PAGE_ALIGN(stack_top);
59062
59063- if (unlikely(stack_top < mmap_min_addr) ||
59064- unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
59065- return -ENOMEM;
59066-
59067 stack_shift = vma->vm_end - stack_top;
59068
59069 bprm->p -= stack_shift;
59070@@ -687,8 +729,28 @@ int setup_arg_pages(struct linux_binprm *bprm,
59071 bprm->exec -= stack_shift;
59072
59073 down_write(&mm->mmap_sem);
59074+
59075+ /* Move stack pages down in memory. */
59076+ if (stack_shift) {
59077+ ret = shift_arg_pages(vma, stack_shift);
59078+ if (ret)
59079+ goto out_unlock;
59080+ }
59081+
59082 vm_flags = VM_STACK_FLAGS;
59083
59084+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
59085+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
59086+ vm_flags &= ~VM_EXEC;
59087+
59088+#ifdef CONFIG_PAX_MPROTECT
59089+ if (mm->pax_flags & MF_PAX_MPROTECT)
59090+ vm_flags &= ~VM_MAYEXEC;
59091+#endif
59092+
59093+ }
59094+#endif
59095+
59096 /*
59097 * Adjust stack execute permissions; explicitly enable for
59098 * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
59099@@ -707,13 +769,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
59100 goto out_unlock;
59101 BUG_ON(prev != vma);
59102
59103- /* Move stack pages down in memory. */
59104- if (stack_shift) {
59105- ret = shift_arg_pages(vma, stack_shift);
59106- if (ret)
59107- goto out_unlock;
59108- }
59109-
59110 /* mprotect_fixup is overkill to remove the temporary stack flags */
59111 vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP;
59112
59113@@ -737,6 +792,27 @@ int setup_arg_pages(struct linux_binprm *bprm,
59114 #endif
59115 current->mm->start_stack = bprm->p;
59116 ret = expand_stack(vma, stack_base);
59117+
59118+#if !defined(CONFIG_STACK_GROWSUP) && defined(CONFIG_PAX_RANDMMAP)
59119+ if (!ret && (mm->pax_flags & MF_PAX_RANDMMAP) && STACK_TOP <= 0xFFFFFFFFU && STACK_TOP > vma->vm_end) {
59120+ unsigned long size;
59121+ vm_flags_t vm_flags;
59122+
59123+ size = STACK_TOP - vma->vm_end;
59124+ vm_flags = VM_NONE | VM_DONTEXPAND | VM_DONTDUMP;
59125+
59126+ ret = vma->vm_end != mmap_region(NULL, vma->vm_end, size, vm_flags, 0);
59127+
59128+#ifdef CONFIG_X86
59129+ if (!ret) {
59130+ size = PAGE_SIZE + mmap_min_addr + ((mm->delta_mmap ^ mm->delta_stack) & (0xFFUL << PAGE_SHIFT));
59131+ ret = 0 != mmap_region(NULL, 0, PAGE_ALIGN(size), vm_flags, 0);
59132+ }
59133+#endif
59134+
59135+ }
59136+#endif
59137+
59138 if (ret)
59139 ret = -EFAULT;
59140
59141@@ -772,6 +848,8 @@ static struct file *do_open_exec(struct filename *name)
59142
59143 fsnotify_open(file);
59144
59145+ trace_open_exec(name->name);
59146+
59147 err = deny_write_access(file);
59148 if (err)
59149 goto exit;
59150@@ -801,7 +879,7 @@ int kernel_read(struct file *file, loff_t offset,
59151 old_fs = get_fs();
59152 set_fs(get_ds());
59153 /* The cast to a user pointer is valid due to the set_fs() */
59154- result = vfs_read(file, (void __user *)addr, count, &pos);
59155+ result = vfs_read(file, (void __force_user *)addr, count, &pos);
59156 set_fs(old_fs);
59157 return result;
59158 }
59159@@ -846,6 +924,7 @@ static int exec_mmap(struct mm_struct *mm)
59160 tsk->mm = mm;
59161 tsk->active_mm = mm;
59162 activate_mm(active_mm, mm);
59163+ populate_stack();
59164 task_unlock(tsk);
59165 if (old_mm) {
59166 up_read(&old_mm->mmap_sem);
59167@@ -1258,7 +1337,7 @@ static void check_unsafe_exec(struct linux_binprm *bprm)
59168 }
59169 rcu_read_unlock();
59170
59171- if (p->fs->users > n_fs)
59172+ if (atomic_read(&p->fs->users) > n_fs)
59173 bprm->unsafe |= LSM_UNSAFE_SHARE;
59174 else
59175 p->fs->in_exec = 1;
59176@@ -1434,6 +1513,31 @@ static int exec_binprm(struct linux_binprm *bprm)
59177 return ret;
59178 }
59179
59180+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
59181+static DEFINE_PER_CPU(u64, exec_counter);
59182+static int __init init_exec_counters(void)
59183+{
59184+ unsigned int cpu;
59185+
59186+ for_each_possible_cpu(cpu) {
59187+ per_cpu(exec_counter, cpu) = (u64)cpu;
59188+ }
59189+
59190+ return 0;
59191+}
59192+early_initcall(init_exec_counters);
59193+static inline void increment_exec_counter(void)
59194+{
59195+ BUILD_BUG_ON(NR_CPUS > (1 << 16));
59196+ current->exec_id = this_cpu_add_return(exec_counter, 1 << 16);
59197+}
59198+#else
59199+static inline void increment_exec_counter(void) {}
59200+#endif
59201+
59202+extern void gr_handle_exec_args(struct linux_binprm *bprm,
59203+ struct user_arg_ptr argv);
59204+
59205 /*
59206 * sys_execve() executes a new program.
59207 */
59208@@ -1441,6 +1545,11 @@ static int do_execve_common(struct filename *filename,
59209 struct user_arg_ptr argv,
59210 struct user_arg_ptr envp)
59211 {
59212+#ifdef CONFIG_GRKERNSEC
59213+ struct file *old_exec_file;
59214+ struct acl_subject_label *old_acl;
59215+ struct rlimit old_rlim[RLIM_NLIMITS];
59216+#endif
59217 struct linux_binprm *bprm;
59218 struct file *file;
59219 struct files_struct *displaced;
59220@@ -1449,6 +1558,8 @@ static int do_execve_common(struct filename *filename,
59221 if (IS_ERR(filename))
59222 return PTR_ERR(filename);
59223
59224+ gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current_user()->processes), 1);
59225+
59226 /*
59227 * We move the actual failure in case of RLIMIT_NPROC excess from
59228 * set*uid() to execve() because too many poorly written programs
59229@@ -1486,11 +1597,21 @@ static int do_execve_common(struct filename *filename,
59230 if (IS_ERR(file))
59231 goto out_unmark;
59232
59233+ if (gr_ptrace_readexec(file, bprm->unsafe)) {
59234+ retval = -EPERM;
59235+ goto out_unmark;
59236+ }
59237+
59238 sched_exec();
59239
59240 bprm->file = file;
59241 bprm->filename = bprm->interp = filename->name;
59242
59243+ if (!gr_acl_handle_execve(file->f_path.dentry, file->f_path.mnt)) {
59244+ retval = -EACCES;
59245+ goto out_unmark;
59246+ }
59247+
59248 retval = bprm_mm_init(bprm);
59249 if (retval)
59250 goto out_unmark;
59251@@ -1507,24 +1628,70 @@ static int do_execve_common(struct filename *filename,
59252 if (retval < 0)
59253 goto out;
59254
59255+#ifdef CONFIG_GRKERNSEC
59256+ old_acl = current->acl;
59257+ memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
59258+ old_exec_file = current->exec_file;
59259+ get_file(file);
59260+ current->exec_file = file;
59261+#endif
59262+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
59263+ /* limit suid stack to 8MB
59264+ * we saved the old limits above and will restore them if this exec fails
59265+ */
59266+ if (((!uid_eq(bprm->cred->euid, current_euid())) || (!gid_eq(bprm->cred->egid, current_egid()))) &&
59267+ (old_rlim[RLIMIT_STACK].rlim_cur > (8 * 1024 * 1024)))
59268+ current->signal->rlim[RLIMIT_STACK].rlim_cur = 8 * 1024 * 1024;
59269+#endif
59270+
59271+ if (gr_process_kernel_exec_ban() || gr_process_suid_exec_ban(bprm)) {
59272+ retval = -EPERM;
59273+ goto out_fail;
59274+ }
59275+
59276+ if (!gr_tpe_allow(file)) {
59277+ retval = -EACCES;
59278+ goto out_fail;
59279+ }
59280+
59281+ if (gr_check_crash_exec(file)) {
59282+ retval = -EACCES;
59283+ goto out_fail;
59284+ }
59285+
59286+ retval = gr_set_proc_label(file->f_path.dentry, file->f_path.mnt,
59287+ bprm->unsafe);
59288+ if (retval < 0)
59289+ goto out_fail;
59290+
59291 retval = copy_strings_kernel(1, &bprm->filename, bprm);
59292 if (retval < 0)
59293- goto out;
59294+ goto out_fail;
59295
59296 bprm->exec = bprm->p;
59297 retval = copy_strings(bprm->envc, envp, bprm);
59298 if (retval < 0)
59299- goto out;
59300+ goto out_fail;
59301
59302 retval = copy_strings(bprm->argc, argv, bprm);
59303 if (retval < 0)
59304- goto out;
59305+ goto out_fail;
59306+
59307+ gr_log_chroot_exec(file->f_path.dentry, file->f_path.mnt);
59308+
59309+ gr_handle_exec_args(bprm, argv);
59310
59311 retval = exec_binprm(bprm);
59312 if (retval < 0)
59313- goto out;
59314+ goto out_fail;
59315+#ifdef CONFIG_GRKERNSEC
59316+ if (old_exec_file)
59317+ fput(old_exec_file);
59318+#endif
59319
59320 /* execve succeeded */
59321+
59322+ increment_exec_counter();
59323 current->fs->in_exec = 0;
59324 current->in_execve = 0;
59325 acct_update_integrals(current);
59326@@ -1535,6 +1702,14 @@ static int do_execve_common(struct filename *filename,
59327 put_files_struct(displaced);
59328 return retval;
59329
59330+out_fail:
59331+#ifdef CONFIG_GRKERNSEC
59332+ current->acl = old_acl;
59333+ memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
59334+ fput(current->exec_file);
59335+ current->exec_file = old_exec_file;
59336+#endif
59337+
59338 out:
59339 if (bprm->mm) {
59340 acct_arg_size(bprm, 0);
59341@@ -1626,3 +1801,312 @@ asmlinkage long compat_sys_execve(const char __user * filename,
59342 return compat_do_execve(getname(filename), argv, envp);
59343 }
59344 #endif
59345+
59346+int pax_check_flags(unsigned long *flags)
59347+{
59348+ int retval = 0;
59349+
59350+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
59351+ if (*flags & MF_PAX_SEGMEXEC)
59352+ {
59353+ *flags &= ~MF_PAX_SEGMEXEC;
59354+ retval = -EINVAL;
59355+ }
59356+#endif
59357+
59358+ if ((*flags & MF_PAX_PAGEEXEC)
59359+
59360+#ifdef CONFIG_PAX_PAGEEXEC
59361+ && (*flags & MF_PAX_SEGMEXEC)
59362+#endif
59363+
59364+ )
59365+ {
59366+ *flags &= ~MF_PAX_PAGEEXEC;
59367+ retval = -EINVAL;
59368+ }
59369+
59370+ if ((*flags & MF_PAX_MPROTECT)
59371+
59372+#ifdef CONFIG_PAX_MPROTECT
59373+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
59374+#endif
59375+
59376+ )
59377+ {
59378+ *flags &= ~MF_PAX_MPROTECT;
59379+ retval = -EINVAL;
59380+ }
59381+
59382+ if ((*flags & MF_PAX_EMUTRAMP)
59383+
59384+#ifdef CONFIG_PAX_EMUTRAMP
59385+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
59386+#endif
59387+
59388+ )
59389+ {
59390+ *flags &= ~MF_PAX_EMUTRAMP;
59391+ retval = -EINVAL;
59392+ }
59393+
59394+ return retval;
59395+}
59396+
59397+EXPORT_SYMBOL(pax_check_flags);
59398+
59399+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
59400+char *pax_get_path(const struct path *path, char *buf, int buflen)
59401+{
59402+ char *pathname = d_path(path, buf, buflen);
59403+
59404+ if (IS_ERR(pathname))
59405+ goto toolong;
59406+
59407+ pathname = mangle_path(buf, pathname, "\t\n\\");
59408+ if (!pathname)
59409+ goto toolong;
59410+
59411+ *pathname = 0;
59412+ return buf;
59413+
59414+toolong:
59415+ return "<path too long>";
59416+}
59417+EXPORT_SYMBOL(pax_get_path);
59418+
59419+void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
59420+{
59421+ struct task_struct *tsk = current;
59422+ struct mm_struct *mm = current->mm;
59423+ char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
59424+ char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
59425+ char *path_exec = NULL;
59426+ char *path_fault = NULL;
59427+ unsigned long start = 0UL, end = 0UL, offset = 0UL;
59428+ siginfo_t info = { };
59429+
59430+ if (buffer_exec && buffer_fault) {
59431+ struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
59432+
59433+ down_read(&mm->mmap_sem);
59434+ vma = mm->mmap;
59435+ while (vma && (!vma_exec || !vma_fault)) {
59436+ if (vma->vm_file && mm->exe_file == vma->vm_file && (vma->vm_flags & VM_EXEC))
59437+ vma_exec = vma;
59438+ if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
59439+ vma_fault = vma;
59440+ vma = vma->vm_next;
59441+ }
59442+ if (vma_exec)
59443+ path_exec = pax_get_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
59444+ if (vma_fault) {
59445+ start = vma_fault->vm_start;
59446+ end = vma_fault->vm_end;
59447+ offset = vma_fault->vm_pgoff << PAGE_SHIFT;
59448+ if (vma_fault->vm_file)
59449+ path_fault = pax_get_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
59450+ else if ((unsigned long)pc >= mm->start_brk && (unsigned long)pc < mm->brk)
59451+ path_fault = "<heap>";
59452+ else if (vma_fault->vm_flags & (VM_GROWSDOWN | VM_GROWSUP))
59453+ path_fault = "<stack>";
59454+ else
59455+ path_fault = "<anonymous mapping>";
59456+ }
59457+ up_read(&mm->mmap_sem);
59458+ }
59459+ if (tsk->signal->curr_ip)
59460+ printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
59461+ else
59462+ printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
59463+ printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
59464+ from_kuid_munged(&init_user_ns, task_uid(tsk)), from_kuid_munged(&init_user_ns, task_euid(tsk)), pc, sp);
59465+ free_page((unsigned long)buffer_exec);
59466+ free_page((unsigned long)buffer_fault);
59467+ pax_report_insns(regs, pc, sp);
59468+ info.si_signo = SIGKILL;
59469+ info.si_errno = 0;
59470+ info.si_code = SI_KERNEL;
59471+ info.si_pid = 0;
59472+ info.si_uid = 0;
59473+ do_coredump(&info);
59474+}
59475+#endif
59476+
59477+#ifdef CONFIG_PAX_REFCOUNT
59478+void pax_report_refcount_overflow(struct pt_regs *regs)
59479+{
59480+ if (current->signal->curr_ip)
59481+ printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
59482+ &current->signal->curr_ip, current->comm, task_pid_nr(current),
59483+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
59484+ else
59485+ printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n", current->comm, task_pid_nr(current),
59486+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
59487+ print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
59488+ preempt_disable();
59489+ show_regs(regs);
59490+ preempt_enable();
59491+ force_sig_info(SIGKILL, SEND_SIG_FORCED, current);
59492+}
59493+#endif
59494+
59495+#ifdef CONFIG_PAX_USERCOPY
59496+/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
59497+static noinline int check_stack_object(const void *obj, unsigned long len)
59498+{
59499+ const void * const stack = task_stack_page(current);
59500+ const void * const stackend = stack + THREAD_SIZE;
59501+
59502+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
59503+ const void *frame = NULL;
59504+ const void *oldframe;
59505+#endif
59506+
59507+ if (obj + len < obj)
59508+ return -1;
59509+
59510+ if (obj + len <= stack || stackend <= obj)
59511+ return 0;
59512+
59513+ if (obj < stack || stackend < obj + len)
59514+ return -1;
59515+
59516+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
59517+ oldframe = __builtin_frame_address(1);
59518+ if (oldframe)
59519+ frame = __builtin_frame_address(2);
59520+ /*
59521+ low ----------------------------------------------> high
59522+ [saved bp][saved ip][args][local vars][saved bp][saved ip]
59523+ ^----------------^
59524+ allow copies only within here
59525+ */
59526+ while (stack <= frame && frame < stackend) {
59527+ /* if obj + len extends past the last frame, this
59528+ check won't pass and the next frame will be 0,
59529+ causing us to bail out and correctly report
59530+ the copy as invalid
59531+ */
59532+ if (obj + len <= frame)
59533+ return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
59534+ oldframe = frame;
59535+ frame = *(const void * const *)frame;
59536+ }
59537+ return -1;
59538+#else
59539+ return 1;
59540+#endif
59541+}
59542+
59543+static __noreturn void pax_report_usercopy(const void *ptr, unsigned long len, bool to_user, const char *type)
59544+{
59545+ if (current->signal->curr_ip)
59546+ printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
59547+ &current->signal->curr_ip, to_user ? "leak" : "overwrite", to_user ? "from" : "to", ptr, type ? : "unknown", len);
59548+ else
59549+ printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
59550+ to_user ? "leak" : "overwrite", to_user ? "from" : "to", ptr, type ? : "unknown", len);
59551+ dump_stack();
59552+ gr_handle_kernel_exploit();
59553+ do_group_exit(SIGKILL);
59554+}
59555+#endif
59556+
59557+#ifdef CONFIG_PAX_USERCOPY
59558+
59559+static inline bool check_kernel_text_object(unsigned long low, unsigned long high)
59560+{
59561+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
59562+ unsigned long textlow = ktla_ktva((unsigned long)_stext);
59563+#ifdef CONFIG_MODULES
59564+ unsigned long texthigh = (unsigned long)MODULES_EXEC_VADDR;
59565+#else
59566+ unsigned long texthigh = ktla_ktva((unsigned long)_etext);
59567+#endif
59568+
59569+#else
59570+ unsigned long textlow = (unsigned long)_stext;
59571+ unsigned long texthigh = (unsigned long)_etext;
59572+
59573+#ifdef CONFIG_X86_64
59574+ /* check against linear mapping as well */
59575+ if (high > (unsigned long)__va(__pa(textlow)) &&
59576+ low < (unsigned long)__va(__pa(texthigh)))
59577+ return true;
59578+#endif
59579+
59580+#endif
59581+
59582+ if (high <= textlow || low >= texthigh)
59583+ return false;
59584+ else
59585+ return true;
59586+}
59587+#endif
59588+
59589+void __check_object_size(const void *ptr, unsigned long n, bool to_user, bool const_size)
59590+{
59591+#ifdef CONFIG_PAX_USERCOPY
59592+ const char *type;
59593+#endif
59594+
59595+#ifndef CONFIG_STACK_GROWSUP
59596+ unsigned long stackstart = (unsigned long)task_stack_page(current);
59597+ unsigned long currentsp = (unsigned long)&stackstart;
59598+ if (unlikely(currentsp < stackstart + 512 ||
59599+ currentsp >= stackstart + THREAD_SIZE))
59600+ BUG();
59601+#endif
59602+
59603+#ifndef CONFIG_PAX_USERCOPY_DEBUG
59604+ if (const_size)
59605+ return;
59606+#endif
59607+
59608+#ifdef CONFIG_PAX_USERCOPY
59609+ if (!n)
59610+ return;
59611+
59612+ type = check_heap_object(ptr, n);
59613+ if (!type) {
59614+ int ret = check_stack_object(ptr, n);
59615+ if (ret == 1 || ret == 2)
59616+ return;
59617+ if (ret == 0) {
59618+ if (check_kernel_text_object((unsigned long)ptr, (unsigned long)ptr + n))
59619+ type = "<kernel text>";
59620+ else
59621+ return;
59622+ } else
59623+ type = "<process stack>";
59624+ }
59625+
59626+ pax_report_usercopy(ptr, n, to_user, type);
59627+#endif
59628+
59629+}
59630+EXPORT_SYMBOL(__check_object_size);
59631+
59632+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
59633+void pax_track_stack(void)
59634+{
59635+ unsigned long sp = (unsigned long)&sp;
59636+ if (sp < current_thread_info()->lowest_stack &&
59637+ sp > (unsigned long)task_stack_page(current))
59638+ current_thread_info()->lowest_stack = sp;
59639+ if (unlikely((sp & ~(THREAD_SIZE - 1)) < (THREAD_SIZE/16)))
59640+ BUG();
59641+}
59642+EXPORT_SYMBOL(pax_track_stack);
59643+#endif
59644+
59645+#ifdef CONFIG_PAX_SIZE_OVERFLOW
59646+void report_size_overflow(const char *file, unsigned int line, const char *func, const char *ssa_name)
59647+{
59648+ printk(KERN_ERR "PAX: size overflow detected in function %s %s:%u %s", func, file, line, ssa_name);
59649+ dump_stack();
59650+ do_group_exit(SIGKILL);
59651+}
59652+EXPORT_SYMBOL(report_size_overflow);
59653+#endif
59654diff --git a/fs/ext2/balloc.c b/fs/ext2/balloc.c
59655index 9f9992b..8b59411 100644
59656--- a/fs/ext2/balloc.c
59657+++ b/fs/ext2/balloc.c
59658@@ -1184,10 +1184,10 @@ static int ext2_has_free_blocks(struct ext2_sb_info *sbi)
59659
59660 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
59661 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
59662- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
59663+ if (free_blocks < root_blocks + 1 &&
59664 !uid_eq(sbi->s_resuid, current_fsuid()) &&
59665 (gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) ||
59666- !in_group_p (sbi->s_resgid))) {
59667+ !in_group_p (sbi->s_resgid)) && !capable_nolog(CAP_SYS_RESOURCE)) {
59668 return 0;
59669 }
59670 return 1;
59671diff --git a/fs/ext2/xattr.c b/fs/ext2/xattr.c
59672index 9142614..97484fa 100644
59673--- a/fs/ext2/xattr.c
59674+++ b/fs/ext2/xattr.c
59675@@ -247,7 +247,7 @@ ext2_xattr_list(struct dentry *dentry, char *buffer, size_t buffer_size)
59676 struct buffer_head *bh = NULL;
59677 struct ext2_xattr_entry *entry;
59678 char *end;
59679- size_t rest = buffer_size;
59680+ size_t rest = buffer_size, total_size = 0;
59681 int error;
59682
59683 ea_idebug(inode, "buffer=%p, buffer_size=%ld",
59684@@ -305,9 +305,10 @@ bad_block: ext2_error(inode->i_sb, "ext2_xattr_list",
59685 buffer += size;
59686 }
59687 rest -= size;
59688+ total_size += size;
59689 }
59690 }
59691- error = buffer_size - rest; /* total size */
59692+ error = total_size;
59693
59694 cleanup:
59695 brelse(bh);
59696diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c
59697index 22548f5..41521d8 100644
59698--- a/fs/ext3/balloc.c
59699+++ b/fs/ext3/balloc.c
59700@@ -1438,10 +1438,10 @@ static int ext3_has_free_blocks(struct ext3_sb_info *sbi, int use_reservation)
59701
59702 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
59703 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
59704- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
59705+ if (free_blocks < root_blocks + 1 &&
59706 !use_reservation && !uid_eq(sbi->s_resuid, current_fsuid()) &&
59707 (gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) ||
59708- !in_group_p (sbi->s_resgid))) {
59709+ !in_group_p (sbi->s_resgid)) && !capable_nolog(CAP_SYS_RESOURCE)) {
59710 return 0;
59711 }
59712 return 1;
59713diff --git a/fs/ext3/xattr.c b/fs/ext3/xattr.c
59714index c6874be..f8a6ae8 100644
59715--- a/fs/ext3/xattr.c
59716+++ b/fs/ext3/xattr.c
59717@@ -330,7 +330,7 @@ static int
59718 ext3_xattr_list_entries(struct dentry *dentry, struct ext3_xattr_entry *entry,
59719 char *buffer, size_t buffer_size)
59720 {
59721- size_t rest = buffer_size;
59722+ size_t rest = buffer_size, total_size = 0;
59723
59724 for (; !IS_LAST_ENTRY(entry); entry = EXT3_XATTR_NEXT(entry)) {
59725 const struct xattr_handler *handler =
59726@@ -347,9 +347,10 @@ ext3_xattr_list_entries(struct dentry *dentry, struct ext3_xattr_entry *entry,
59727 buffer += size;
59728 }
59729 rest -= size;
59730+ total_size += size;
59731 }
59732 }
59733- return buffer_size - rest;
59734+ return total_size;
59735 }
59736
59737 static int
59738diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
59739index 6ea7b14..8fa16d9 100644
59740--- a/fs/ext4/balloc.c
59741+++ b/fs/ext4/balloc.c
59742@@ -534,8 +534,8 @@ static int ext4_has_free_clusters(struct ext4_sb_info *sbi,
59743 /* Hm, nope. Are (enough) root reserved clusters available? */
59744 if (uid_eq(sbi->s_resuid, current_fsuid()) ||
59745 (!gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) && in_group_p(sbi->s_resgid)) ||
59746- capable(CAP_SYS_RESOURCE) ||
59747- (flags & EXT4_MB_USE_ROOT_BLOCKS)) {
59748+ (flags & EXT4_MB_USE_ROOT_BLOCKS) ||
59749+ capable_nolog(CAP_SYS_RESOURCE)) {
59750
59751 if (free_clusters >= (nclusters + dirty_clusters +
59752 resv_clusters))
59753diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
59754index 3a603a8..9b868ba 100644
59755--- a/fs/ext4/ext4.h
59756+++ b/fs/ext4/ext4.h
59757@@ -1269,19 +1269,19 @@ struct ext4_sb_info {
59758 unsigned long s_mb_last_start;
59759
59760 /* stats for buddy allocator */
59761- atomic_t s_bal_reqs; /* number of reqs with len > 1 */
59762- atomic_t s_bal_success; /* we found long enough chunks */
59763- atomic_t s_bal_allocated; /* in blocks */
59764- atomic_t s_bal_ex_scanned; /* total extents scanned */
59765- atomic_t s_bal_goals; /* goal hits */
59766- atomic_t s_bal_breaks; /* too long searches */
59767- atomic_t s_bal_2orders; /* 2^order hits */
59768+ atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */
59769+ atomic_unchecked_t s_bal_success; /* we found long enough chunks */
59770+ atomic_unchecked_t s_bal_allocated; /* in blocks */
59771+ atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */
59772+ atomic_unchecked_t s_bal_goals; /* goal hits */
59773+ atomic_unchecked_t s_bal_breaks; /* too long searches */
59774+ atomic_unchecked_t s_bal_2orders; /* 2^order hits */
59775 spinlock_t s_bal_lock;
59776 unsigned long s_mb_buddies_generated;
59777 unsigned long long s_mb_generation_time;
59778- atomic_t s_mb_lost_chunks;
59779- atomic_t s_mb_preallocated;
59780- atomic_t s_mb_discarded;
59781+ atomic_unchecked_t s_mb_lost_chunks;
59782+ atomic_unchecked_t s_mb_preallocated;
59783+ atomic_unchecked_t s_mb_discarded;
59784 atomic_t s_lock_busy;
59785
59786 /* locality groups */
59787diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
59788index 04a5c75..09894fa 100644
59789--- a/fs/ext4/mballoc.c
59790+++ b/fs/ext4/mballoc.c
59791@@ -1880,7 +1880,7 @@ void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
59792 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
59793
59794 if (EXT4_SB(sb)->s_mb_stats)
59795- atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
59796+ atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
59797
59798 break;
59799 }
59800@@ -2189,7 +2189,7 @@ repeat:
59801 ac->ac_status = AC_STATUS_CONTINUE;
59802 ac->ac_flags |= EXT4_MB_HINT_FIRST;
59803 cr = 3;
59804- atomic_inc(&sbi->s_mb_lost_chunks);
59805+ atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
59806 goto repeat;
59807 }
59808 }
59809@@ -2697,25 +2697,25 @@ int ext4_mb_release(struct super_block *sb)
59810 if (sbi->s_mb_stats) {
59811 ext4_msg(sb, KERN_INFO,
59812 "mballoc: %u blocks %u reqs (%u success)",
59813- atomic_read(&sbi->s_bal_allocated),
59814- atomic_read(&sbi->s_bal_reqs),
59815- atomic_read(&sbi->s_bal_success));
59816+ atomic_read_unchecked(&sbi->s_bal_allocated),
59817+ atomic_read_unchecked(&sbi->s_bal_reqs),
59818+ atomic_read_unchecked(&sbi->s_bal_success));
59819 ext4_msg(sb, KERN_INFO,
59820 "mballoc: %u extents scanned, %u goal hits, "
59821 "%u 2^N hits, %u breaks, %u lost",
59822- atomic_read(&sbi->s_bal_ex_scanned),
59823- atomic_read(&sbi->s_bal_goals),
59824- atomic_read(&sbi->s_bal_2orders),
59825- atomic_read(&sbi->s_bal_breaks),
59826- atomic_read(&sbi->s_mb_lost_chunks));
59827+ atomic_read_unchecked(&sbi->s_bal_ex_scanned),
59828+ atomic_read_unchecked(&sbi->s_bal_goals),
59829+ atomic_read_unchecked(&sbi->s_bal_2orders),
59830+ atomic_read_unchecked(&sbi->s_bal_breaks),
59831+ atomic_read_unchecked(&sbi->s_mb_lost_chunks));
59832 ext4_msg(sb, KERN_INFO,
59833 "mballoc: %lu generated and it took %Lu",
59834 sbi->s_mb_buddies_generated,
59835 sbi->s_mb_generation_time);
59836 ext4_msg(sb, KERN_INFO,
59837 "mballoc: %u preallocated, %u discarded",
59838- atomic_read(&sbi->s_mb_preallocated),
59839- atomic_read(&sbi->s_mb_discarded));
59840+ atomic_read_unchecked(&sbi->s_mb_preallocated),
59841+ atomic_read_unchecked(&sbi->s_mb_discarded));
59842 }
59843
59844 free_percpu(sbi->s_locality_groups);
59845@@ -3169,16 +3169,16 @@ static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
59846 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
59847
59848 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
59849- atomic_inc(&sbi->s_bal_reqs);
59850- atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
59851+ atomic_inc_unchecked(&sbi->s_bal_reqs);
59852+ atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
59853 if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len)
59854- atomic_inc(&sbi->s_bal_success);
59855- atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
59856+ atomic_inc_unchecked(&sbi->s_bal_success);
59857+ atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
59858 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
59859 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
59860- atomic_inc(&sbi->s_bal_goals);
59861+ atomic_inc_unchecked(&sbi->s_bal_goals);
59862 if (ac->ac_found > sbi->s_mb_max_to_scan)
59863- atomic_inc(&sbi->s_bal_breaks);
59864+ atomic_inc_unchecked(&sbi->s_bal_breaks);
59865 }
59866
59867 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
59868@@ -3583,7 +3583,7 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
59869 trace_ext4_mb_new_inode_pa(ac, pa);
59870
59871 ext4_mb_use_inode_pa(ac, pa);
59872- atomic_add(pa->pa_free, &sbi->s_mb_preallocated);
59873+ atomic_add_unchecked(pa->pa_free, &sbi->s_mb_preallocated);
59874
59875 ei = EXT4_I(ac->ac_inode);
59876 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
59877@@ -3643,7 +3643,7 @@ ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
59878 trace_ext4_mb_new_group_pa(ac, pa);
59879
59880 ext4_mb_use_group_pa(ac, pa);
59881- atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
59882+ atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
59883
59884 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
59885 lg = ac->ac_lg;
59886@@ -3732,7 +3732,7 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
59887 * from the bitmap and continue.
59888 */
59889 }
59890- atomic_add(free, &sbi->s_mb_discarded);
59891+ atomic_add_unchecked(free, &sbi->s_mb_discarded);
59892
59893 return err;
59894 }
59895@@ -3750,7 +3750,7 @@ ext4_mb_release_group_pa(struct ext4_buddy *e4b,
59896 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
59897 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
59898 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
59899- atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
59900+ atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
59901 trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
59902
59903 return 0;
59904diff --git a/fs/ext4/mmp.c b/fs/ext4/mmp.c
59905index 04434ad..6404663 100644
59906--- a/fs/ext4/mmp.c
59907+++ b/fs/ext4/mmp.c
59908@@ -113,7 +113,7 @@ static int read_mmp_block(struct super_block *sb, struct buffer_head **bh,
59909 void __dump_mmp_msg(struct super_block *sb, struct mmp_struct *mmp,
59910 const char *function, unsigned int line, const char *msg)
59911 {
59912- __ext4_warning(sb, function, line, msg);
59913+ __ext4_warning(sb, function, line, "%s", msg);
59914 __ext4_warning(sb, function, line,
59915 "MMP failure info: last update time: %llu, last update "
59916 "node: %s, last update device: %s\n",
59917diff --git a/fs/ext4/super.c b/fs/ext4/super.c
59918index 710fed2..a82e4e8 100644
59919--- a/fs/ext4/super.c
59920+++ b/fs/ext4/super.c
59921@@ -1270,7 +1270,7 @@ static ext4_fsblk_t get_sb_block(void **data)
59922 }
59923
59924 #define DEFAULT_JOURNAL_IOPRIO (IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, 3))
59925-static char deprecated_msg[] = "Mount option \"%s\" will be removed by %s\n"
59926+static const char deprecated_msg[] = "Mount option \"%s\" will be removed by %s\n"
59927 "Contact linux-ext4@vger.kernel.org if you think we should keep it.\n";
59928
59929 #ifdef CONFIG_QUOTA
59930@@ -2450,7 +2450,7 @@ struct ext4_attr {
59931 int offset;
59932 int deprecated_val;
59933 } u;
59934-};
59935+} __do_const;
59936
59937 static int parse_strtoull(const char *buf,
59938 unsigned long long max, unsigned long long *value)
59939diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
59940index 55e611c..cfad16d 100644
59941--- a/fs/ext4/xattr.c
59942+++ b/fs/ext4/xattr.c
59943@@ -381,7 +381,7 @@ static int
59944 ext4_xattr_list_entries(struct dentry *dentry, struct ext4_xattr_entry *entry,
59945 char *buffer, size_t buffer_size)
59946 {
59947- size_t rest = buffer_size;
59948+ size_t rest = buffer_size, total_size = 0;
59949
59950 for (; !IS_LAST_ENTRY(entry); entry = EXT4_XATTR_NEXT(entry)) {
59951 const struct xattr_handler *handler =
59952@@ -398,9 +398,10 @@ ext4_xattr_list_entries(struct dentry *dentry, struct ext4_xattr_entry *entry,
59953 buffer += size;
59954 }
59955 rest -= size;
59956+ total_size += size;
59957 }
59958 }
59959- return buffer_size - rest;
59960+ return total_size;
59961 }
59962
59963 static int
59964diff --git a/fs/fcntl.c b/fs/fcntl.c
59965index ef68665..5deacdc 100644
59966--- a/fs/fcntl.c
59967+++ b/fs/fcntl.c
59968@@ -106,6 +106,11 @@ int __f_setown(struct file *filp, struct pid *pid, enum pid_type type,
59969 if (err)
59970 return err;
59971
59972+ if (gr_handle_chroot_fowner(pid, type))
59973+ return -ENOENT;
59974+ if (gr_check_protected_task_fowner(pid, type))
59975+ return -EACCES;
59976+
59977 f_modown(filp, pid, type, force);
59978 return 0;
59979 }
59980diff --git a/fs/fhandle.c b/fs/fhandle.c
59981index 999ff5c..41f4109 100644
59982--- a/fs/fhandle.c
59983+++ b/fs/fhandle.c
59984@@ -67,8 +67,7 @@ static long do_sys_name_to_handle(struct path *path,
59985 } else
59986 retval = 0;
59987 /* copy the mount id */
59988- if (copy_to_user(mnt_id, &real_mount(path->mnt)->mnt_id,
59989- sizeof(*mnt_id)) ||
59990+ if (put_user(real_mount(path->mnt)->mnt_id, mnt_id) ||
59991 copy_to_user(ufh, handle,
59992 sizeof(struct file_handle) + handle_bytes))
59993 retval = -EFAULT;
59994diff --git a/fs/file.c b/fs/file.c
59995index eb56a13..ccee850 100644
59996--- a/fs/file.c
59997+++ b/fs/file.c
59998@@ -16,6 +16,7 @@
59999 #include <linux/slab.h>
60000 #include <linux/vmalloc.h>
60001 #include <linux/file.h>
60002+#include <linux/security.h>
60003 #include <linux/fdtable.h>
60004 #include <linux/bitops.h>
60005 #include <linux/interrupt.h>
60006@@ -141,7 +142,7 @@ out:
60007 * Return <0 error code on error; 1 on successful completion.
60008 * The files->file_lock should be held on entry, and will be held on exit.
60009 */
60010-static int expand_fdtable(struct files_struct *files, int nr)
60011+static int expand_fdtable(struct files_struct *files, unsigned int nr)
60012 __releases(files->file_lock)
60013 __acquires(files->file_lock)
60014 {
60015@@ -186,7 +187,7 @@ static int expand_fdtable(struct files_struct *files, int nr)
60016 * expanded and execution may have blocked.
60017 * The files->file_lock should be held on entry, and will be held on exit.
60018 */
60019-static int expand_files(struct files_struct *files, int nr)
60020+static int expand_files(struct files_struct *files, unsigned int nr)
60021 {
60022 struct fdtable *fdt;
60023
60024@@ -807,6 +808,7 @@ int replace_fd(unsigned fd, struct file *file, unsigned flags)
60025 if (!file)
60026 return __close_fd(files, fd);
60027
60028+ gr_learn_resource(current, RLIMIT_NOFILE, fd, 0);
60029 if (fd >= rlimit(RLIMIT_NOFILE))
60030 return -EBADF;
60031
60032@@ -833,6 +835,7 @@ SYSCALL_DEFINE3(dup3, unsigned int, oldfd, unsigned int, newfd, int, flags)
60033 if (unlikely(oldfd == newfd))
60034 return -EINVAL;
60035
60036+ gr_learn_resource(current, RLIMIT_NOFILE, newfd, 0);
60037 if (newfd >= rlimit(RLIMIT_NOFILE))
60038 return -EBADF;
60039
60040@@ -888,6 +891,7 @@ SYSCALL_DEFINE1(dup, unsigned int, fildes)
60041 int f_dupfd(unsigned int from, struct file *file, unsigned flags)
60042 {
60043 int err;
60044+ gr_learn_resource(current, RLIMIT_NOFILE, from, 0);
60045 if (from >= rlimit(RLIMIT_NOFILE))
60046 return -EINVAL;
60047 err = alloc_fd(from, flags);
60048diff --git a/fs/filesystems.c b/fs/filesystems.c
60049index 92567d9..fcd8cbf 100644
60050--- a/fs/filesystems.c
60051+++ b/fs/filesystems.c
60052@@ -273,7 +273,11 @@ struct file_system_type *get_fs_type(const char *name)
60053 int len = dot ? dot - name : strlen(name);
60054
60055 fs = __get_fs_type(name, len);
60056+#ifdef CONFIG_GRKERNSEC_MODHARDEN
60057+ if (!fs && (___request_module(true, "grsec_modharden_fs", "fs-%.*s", len, name) == 0))
60058+#else
60059 if (!fs && (request_module("fs-%.*s", len, name) == 0))
60060+#endif
60061 fs = __get_fs_type(name, len);
60062
60063 if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
60064diff --git a/fs/fs_struct.c b/fs/fs_struct.c
60065index 7dca743..543d620 100644
60066--- a/fs/fs_struct.c
60067+++ b/fs/fs_struct.c
60068@@ -4,6 +4,7 @@
60069 #include <linux/path.h>
60070 #include <linux/slab.h>
60071 #include <linux/fs_struct.h>
60072+#include <linux/grsecurity.h>
60073 #include "internal.h"
60074
60075 /*
60076@@ -19,6 +20,7 @@ void set_fs_root(struct fs_struct *fs, const struct path *path)
60077 write_seqcount_begin(&fs->seq);
60078 old_root = fs->root;
60079 fs->root = *path;
60080+ gr_set_chroot_entries(current, path);
60081 write_seqcount_end(&fs->seq);
60082 spin_unlock(&fs->lock);
60083 if (old_root.dentry)
60084@@ -67,6 +69,10 @@ void chroot_fs_refs(const struct path *old_root, const struct path *new_root)
60085 int hits = 0;
60086 spin_lock(&fs->lock);
60087 write_seqcount_begin(&fs->seq);
60088+ /* this root replacement is only done by pivot_root,
60089+ leave grsec's chroot tagging alone for this task
60090+ so that a pivoted root isn't treated as a chroot
60091+ */
60092 hits += replace_path(&fs->root, old_root, new_root);
60093 hits += replace_path(&fs->pwd, old_root, new_root);
60094 write_seqcount_end(&fs->seq);
60095@@ -99,7 +105,8 @@ void exit_fs(struct task_struct *tsk)
60096 task_lock(tsk);
60097 spin_lock(&fs->lock);
60098 tsk->fs = NULL;
60099- kill = !--fs->users;
60100+ gr_clear_chroot_entries(tsk);
60101+ kill = !atomic_dec_return(&fs->users);
60102 spin_unlock(&fs->lock);
60103 task_unlock(tsk);
60104 if (kill)
60105@@ -112,7 +119,7 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
60106 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
60107 /* We don't need to lock fs - think why ;-) */
60108 if (fs) {
60109- fs->users = 1;
60110+ atomic_set(&fs->users, 1);
60111 fs->in_exec = 0;
60112 spin_lock_init(&fs->lock);
60113 seqcount_init(&fs->seq);
60114@@ -121,6 +128,9 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
60115 spin_lock(&old->lock);
60116 fs->root = old->root;
60117 path_get(&fs->root);
60118+ /* instead of calling gr_set_chroot_entries here,
60119+ we call it from every caller of this function
60120+ */
60121 fs->pwd = old->pwd;
60122 path_get(&fs->pwd);
60123 spin_unlock(&old->lock);
60124@@ -139,8 +149,9 @@ int unshare_fs_struct(void)
60125
60126 task_lock(current);
60127 spin_lock(&fs->lock);
60128- kill = !--fs->users;
60129+ kill = !atomic_dec_return(&fs->users);
60130 current->fs = new_fs;
60131+ gr_set_chroot_entries(current, &new_fs->root);
60132 spin_unlock(&fs->lock);
60133 task_unlock(current);
60134
60135@@ -153,13 +164,13 @@ EXPORT_SYMBOL_GPL(unshare_fs_struct);
60136
60137 int current_umask(void)
60138 {
60139- return current->fs->umask;
60140+ return current->fs->umask | gr_acl_umask();
60141 }
60142 EXPORT_SYMBOL(current_umask);
60143
60144 /* to be mentioned only in INIT_TASK */
60145 struct fs_struct init_fs = {
60146- .users = 1,
60147+ .users = ATOMIC_INIT(1),
60148 .lock = __SPIN_LOCK_UNLOCKED(init_fs.lock),
60149 .seq = SEQCNT_ZERO(init_fs.seq),
60150 .umask = 0022,
60151diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c
60152index 29d7feb..303644d 100644
60153--- a/fs/fscache/cookie.c
60154+++ b/fs/fscache/cookie.c
60155@@ -19,7 +19,7 @@
60156
60157 struct kmem_cache *fscache_cookie_jar;
60158
60159-static atomic_t fscache_object_debug_id = ATOMIC_INIT(0);
60160+static atomic_unchecked_t fscache_object_debug_id = ATOMIC_INIT(0);
60161
60162 static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie);
60163 static int fscache_alloc_object(struct fscache_cache *cache,
60164@@ -69,11 +69,11 @@ struct fscache_cookie *__fscache_acquire_cookie(
60165 parent ? (char *) parent->def->name : "<no-parent>",
60166 def->name, netfs_data, enable);
60167
60168- fscache_stat(&fscache_n_acquires);
60169+ fscache_stat_unchecked(&fscache_n_acquires);
60170
60171 /* if there's no parent cookie, then we don't create one here either */
60172 if (!parent) {
60173- fscache_stat(&fscache_n_acquires_null);
60174+ fscache_stat_unchecked(&fscache_n_acquires_null);
60175 _leave(" [no parent]");
60176 return NULL;
60177 }
60178@@ -88,7 +88,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
60179 /* allocate and initialise a cookie */
60180 cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
60181 if (!cookie) {
60182- fscache_stat(&fscache_n_acquires_oom);
60183+ fscache_stat_unchecked(&fscache_n_acquires_oom);
60184 _leave(" [ENOMEM]");
60185 return NULL;
60186 }
60187@@ -115,13 +115,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
60188
60189 switch (cookie->def->type) {
60190 case FSCACHE_COOKIE_TYPE_INDEX:
60191- fscache_stat(&fscache_n_cookie_index);
60192+ fscache_stat_unchecked(&fscache_n_cookie_index);
60193 break;
60194 case FSCACHE_COOKIE_TYPE_DATAFILE:
60195- fscache_stat(&fscache_n_cookie_data);
60196+ fscache_stat_unchecked(&fscache_n_cookie_data);
60197 break;
60198 default:
60199- fscache_stat(&fscache_n_cookie_special);
60200+ fscache_stat_unchecked(&fscache_n_cookie_special);
60201 break;
60202 }
60203
60204@@ -135,7 +135,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
60205 } else {
60206 atomic_dec(&parent->n_children);
60207 __fscache_cookie_put(cookie);
60208- fscache_stat(&fscache_n_acquires_nobufs);
60209+ fscache_stat_unchecked(&fscache_n_acquires_nobufs);
60210 _leave(" = NULL");
60211 return NULL;
60212 }
60213@@ -144,7 +144,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
60214 }
60215 }
60216
60217- fscache_stat(&fscache_n_acquires_ok);
60218+ fscache_stat_unchecked(&fscache_n_acquires_ok);
60219 _leave(" = %p", cookie);
60220 return cookie;
60221 }
60222@@ -213,7 +213,7 @@ static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie)
60223 cache = fscache_select_cache_for_object(cookie->parent);
60224 if (!cache) {
60225 up_read(&fscache_addremove_sem);
60226- fscache_stat(&fscache_n_acquires_no_cache);
60227+ fscache_stat_unchecked(&fscache_n_acquires_no_cache);
60228 _leave(" = -ENOMEDIUM [no cache]");
60229 return -ENOMEDIUM;
60230 }
60231@@ -297,14 +297,14 @@ static int fscache_alloc_object(struct fscache_cache *cache,
60232 object = cache->ops->alloc_object(cache, cookie);
60233 fscache_stat_d(&fscache_n_cop_alloc_object);
60234 if (IS_ERR(object)) {
60235- fscache_stat(&fscache_n_object_no_alloc);
60236+ fscache_stat_unchecked(&fscache_n_object_no_alloc);
60237 ret = PTR_ERR(object);
60238 goto error;
60239 }
60240
60241- fscache_stat(&fscache_n_object_alloc);
60242+ fscache_stat_unchecked(&fscache_n_object_alloc);
60243
60244- object->debug_id = atomic_inc_return(&fscache_object_debug_id);
60245+ object->debug_id = atomic_inc_return_unchecked(&fscache_object_debug_id);
60246
60247 _debug("ALLOC OBJ%x: %s {%lx}",
60248 object->debug_id, cookie->def->name, object->events);
60249@@ -418,7 +418,7 @@ void __fscache_invalidate(struct fscache_cookie *cookie)
60250
60251 _enter("{%s}", cookie->def->name);
60252
60253- fscache_stat(&fscache_n_invalidates);
60254+ fscache_stat_unchecked(&fscache_n_invalidates);
60255
60256 /* Only permit invalidation of data files. Invalidating an index will
60257 * require the caller to release all its attachments to the tree rooted
60258@@ -477,10 +477,10 @@ void __fscache_update_cookie(struct fscache_cookie *cookie)
60259 {
60260 struct fscache_object *object;
60261
60262- fscache_stat(&fscache_n_updates);
60263+ fscache_stat_unchecked(&fscache_n_updates);
60264
60265 if (!cookie) {
60266- fscache_stat(&fscache_n_updates_null);
60267+ fscache_stat_unchecked(&fscache_n_updates_null);
60268 _leave(" [no cookie]");
60269 return;
60270 }
60271@@ -581,12 +581,12 @@ EXPORT_SYMBOL(__fscache_disable_cookie);
60272 */
60273 void __fscache_relinquish_cookie(struct fscache_cookie *cookie, bool retire)
60274 {
60275- fscache_stat(&fscache_n_relinquishes);
60276+ fscache_stat_unchecked(&fscache_n_relinquishes);
60277 if (retire)
60278- fscache_stat(&fscache_n_relinquishes_retire);
60279+ fscache_stat_unchecked(&fscache_n_relinquishes_retire);
60280
60281 if (!cookie) {
60282- fscache_stat(&fscache_n_relinquishes_null);
60283+ fscache_stat_unchecked(&fscache_n_relinquishes_null);
60284 _leave(" [no cookie]");
60285 return;
60286 }
60287@@ -687,7 +687,7 @@ int __fscache_check_consistency(struct fscache_cookie *cookie)
60288 if (test_bit(FSCACHE_IOERROR, &object->cache->flags))
60289 goto inconsistent;
60290
60291- op->debug_id = atomic_inc_return(&fscache_op_debug_id);
60292+ op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
60293
60294 __fscache_use_cookie(cookie);
60295 if (fscache_submit_op(object, op) < 0)
60296diff --git a/fs/fscache/internal.h b/fs/fscache/internal.h
60297index 4226f66..0fb3f45 100644
60298--- a/fs/fscache/internal.h
60299+++ b/fs/fscache/internal.h
60300@@ -133,8 +133,8 @@ extern void fscache_operation_gc(struct work_struct *);
60301 extern int fscache_wait_for_deferred_lookup(struct fscache_cookie *);
60302 extern int fscache_wait_for_operation_activation(struct fscache_object *,
60303 struct fscache_operation *,
60304- atomic_t *,
60305- atomic_t *,
60306+ atomic_unchecked_t *,
60307+ atomic_unchecked_t *,
60308 void (*)(struct fscache_operation *));
60309 extern void fscache_invalidate_writes(struct fscache_cookie *);
60310
60311@@ -153,101 +153,101 @@ extern void fscache_proc_cleanup(void);
60312 * stats.c
60313 */
60314 #ifdef CONFIG_FSCACHE_STATS
60315-extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
60316-extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
60317+extern atomic_unchecked_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
60318+extern atomic_unchecked_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
60319
60320-extern atomic_t fscache_n_op_pend;
60321-extern atomic_t fscache_n_op_run;
60322-extern atomic_t fscache_n_op_enqueue;
60323-extern atomic_t fscache_n_op_deferred_release;
60324-extern atomic_t fscache_n_op_release;
60325-extern atomic_t fscache_n_op_gc;
60326-extern atomic_t fscache_n_op_cancelled;
60327-extern atomic_t fscache_n_op_rejected;
60328+extern atomic_unchecked_t fscache_n_op_pend;
60329+extern atomic_unchecked_t fscache_n_op_run;
60330+extern atomic_unchecked_t fscache_n_op_enqueue;
60331+extern atomic_unchecked_t fscache_n_op_deferred_release;
60332+extern atomic_unchecked_t fscache_n_op_release;
60333+extern atomic_unchecked_t fscache_n_op_gc;
60334+extern atomic_unchecked_t fscache_n_op_cancelled;
60335+extern atomic_unchecked_t fscache_n_op_rejected;
60336
60337-extern atomic_t fscache_n_attr_changed;
60338-extern atomic_t fscache_n_attr_changed_ok;
60339-extern atomic_t fscache_n_attr_changed_nobufs;
60340-extern atomic_t fscache_n_attr_changed_nomem;
60341-extern atomic_t fscache_n_attr_changed_calls;
60342+extern atomic_unchecked_t fscache_n_attr_changed;
60343+extern atomic_unchecked_t fscache_n_attr_changed_ok;
60344+extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
60345+extern atomic_unchecked_t fscache_n_attr_changed_nomem;
60346+extern atomic_unchecked_t fscache_n_attr_changed_calls;
60347
60348-extern atomic_t fscache_n_allocs;
60349-extern atomic_t fscache_n_allocs_ok;
60350-extern atomic_t fscache_n_allocs_wait;
60351-extern atomic_t fscache_n_allocs_nobufs;
60352-extern atomic_t fscache_n_allocs_intr;
60353-extern atomic_t fscache_n_allocs_object_dead;
60354-extern atomic_t fscache_n_alloc_ops;
60355-extern atomic_t fscache_n_alloc_op_waits;
60356+extern atomic_unchecked_t fscache_n_allocs;
60357+extern atomic_unchecked_t fscache_n_allocs_ok;
60358+extern atomic_unchecked_t fscache_n_allocs_wait;
60359+extern atomic_unchecked_t fscache_n_allocs_nobufs;
60360+extern atomic_unchecked_t fscache_n_allocs_intr;
60361+extern atomic_unchecked_t fscache_n_allocs_object_dead;
60362+extern atomic_unchecked_t fscache_n_alloc_ops;
60363+extern atomic_unchecked_t fscache_n_alloc_op_waits;
60364
60365-extern atomic_t fscache_n_retrievals;
60366-extern atomic_t fscache_n_retrievals_ok;
60367-extern atomic_t fscache_n_retrievals_wait;
60368-extern atomic_t fscache_n_retrievals_nodata;
60369-extern atomic_t fscache_n_retrievals_nobufs;
60370-extern atomic_t fscache_n_retrievals_intr;
60371-extern atomic_t fscache_n_retrievals_nomem;
60372-extern atomic_t fscache_n_retrievals_object_dead;
60373-extern atomic_t fscache_n_retrieval_ops;
60374-extern atomic_t fscache_n_retrieval_op_waits;
60375+extern atomic_unchecked_t fscache_n_retrievals;
60376+extern atomic_unchecked_t fscache_n_retrievals_ok;
60377+extern atomic_unchecked_t fscache_n_retrievals_wait;
60378+extern atomic_unchecked_t fscache_n_retrievals_nodata;
60379+extern atomic_unchecked_t fscache_n_retrievals_nobufs;
60380+extern atomic_unchecked_t fscache_n_retrievals_intr;
60381+extern atomic_unchecked_t fscache_n_retrievals_nomem;
60382+extern atomic_unchecked_t fscache_n_retrievals_object_dead;
60383+extern atomic_unchecked_t fscache_n_retrieval_ops;
60384+extern atomic_unchecked_t fscache_n_retrieval_op_waits;
60385
60386-extern atomic_t fscache_n_stores;
60387-extern atomic_t fscache_n_stores_ok;
60388-extern atomic_t fscache_n_stores_again;
60389-extern atomic_t fscache_n_stores_nobufs;
60390-extern atomic_t fscache_n_stores_oom;
60391-extern atomic_t fscache_n_store_ops;
60392-extern atomic_t fscache_n_store_calls;
60393-extern atomic_t fscache_n_store_pages;
60394-extern atomic_t fscache_n_store_radix_deletes;
60395-extern atomic_t fscache_n_store_pages_over_limit;
60396+extern atomic_unchecked_t fscache_n_stores;
60397+extern atomic_unchecked_t fscache_n_stores_ok;
60398+extern atomic_unchecked_t fscache_n_stores_again;
60399+extern atomic_unchecked_t fscache_n_stores_nobufs;
60400+extern atomic_unchecked_t fscache_n_stores_oom;
60401+extern atomic_unchecked_t fscache_n_store_ops;
60402+extern atomic_unchecked_t fscache_n_store_calls;
60403+extern atomic_unchecked_t fscache_n_store_pages;
60404+extern atomic_unchecked_t fscache_n_store_radix_deletes;
60405+extern atomic_unchecked_t fscache_n_store_pages_over_limit;
60406
60407-extern atomic_t fscache_n_store_vmscan_not_storing;
60408-extern atomic_t fscache_n_store_vmscan_gone;
60409-extern atomic_t fscache_n_store_vmscan_busy;
60410-extern atomic_t fscache_n_store_vmscan_cancelled;
60411-extern atomic_t fscache_n_store_vmscan_wait;
60412+extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
60413+extern atomic_unchecked_t fscache_n_store_vmscan_gone;
60414+extern atomic_unchecked_t fscache_n_store_vmscan_busy;
60415+extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
60416+extern atomic_unchecked_t fscache_n_store_vmscan_wait;
60417
60418-extern atomic_t fscache_n_marks;
60419-extern atomic_t fscache_n_uncaches;
60420+extern atomic_unchecked_t fscache_n_marks;
60421+extern atomic_unchecked_t fscache_n_uncaches;
60422
60423-extern atomic_t fscache_n_acquires;
60424-extern atomic_t fscache_n_acquires_null;
60425-extern atomic_t fscache_n_acquires_no_cache;
60426-extern atomic_t fscache_n_acquires_ok;
60427-extern atomic_t fscache_n_acquires_nobufs;
60428-extern atomic_t fscache_n_acquires_oom;
60429+extern atomic_unchecked_t fscache_n_acquires;
60430+extern atomic_unchecked_t fscache_n_acquires_null;
60431+extern atomic_unchecked_t fscache_n_acquires_no_cache;
60432+extern atomic_unchecked_t fscache_n_acquires_ok;
60433+extern atomic_unchecked_t fscache_n_acquires_nobufs;
60434+extern atomic_unchecked_t fscache_n_acquires_oom;
60435
60436-extern atomic_t fscache_n_invalidates;
60437-extern atomic_t fscache_n_invalidates_run;
60438+extern atomic_unchecked_t fscache_n_invalidates;
60439+extern atomic_unchecked_t fscache_n_invalidates_run;
60440
60441-extern atomic_t fscache_n_updates;
60442-extern atomic_t fscache_n_updates_null;
60443-extern atomic_t fscache_n_updates_run;
60444+extern atomic_unchecked_t fscache_n_updates;
60445+extern atomic_unchecked_t fscache_n_updates_null;
60446+extern atomic_unchecked_t fscache_n_updates_run;
60447
60448-extern atomic_t fscache_n_relinquishes;
60449-extern atomic_t fscache_n_relinquishes_null;
60450-extern atomic_t fscache_n_relinquishes_waitcrt;
60451-extern atomic_t fscache_n_relinquishes_retire;
60452+extern atomic_unchecked_t fscache_n_relinquishes;
60453+extern atomic_unchecked_t fscache_n_relinquishes_null;
60454+extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
60455+extern atomic_unchecked_t fscache_n_relinquishes_retire;
60456
60457-extern atomic_t fscache_n_cookie_index;
60458-extern atomic_t fscache_n_cookie_data;
60459-extern atomic_t fscache_n_cookie_special;
60460+extern atomic_unchecked_t fscache_n_cookie_index;
60461+extern atomic_unchecked_t fscache_n_cookie_data;
60462+extern atomic_unchecked_t fscache_n_cookie_special;
60463
60464-extern atomic_t fscache_n_object_alloc;
60465-extern atomic_t fscache_n_object_no_alloc;
60466-extern atomic_t fscache_n_object_lookups;
60467-extern atomic_t fscache_n_object_lookups_negative;
60468-extern atomic_t fscache_n_object_lookups_positive;
60469-extern atomic_t fscache_n_object_lookups_timed_out;
60470-extern atomic_t fscache_n_object_created;
60471-extern atomic_t fscache_n_object_avail;
60472-extern atomic_t fscache_n_object_dead;
60473+extern atomic_unchecked_t fscache_n_object_alloc;
60474+extern atomic_unchecked_t fscache_n_object_no_alloc;
60475+extern atomic_unchecked_t fscache_n_object_lookups;
60476+extern atomic_unchecked_t fscache_n_object_lookups_negative;
60477+extern atomic_unchecked_t fscache_n_object_lookups_positive;
60478+extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
60479+extern atomic_unchecked_t fscache_n_object_created;
60480+extern atomic_unchecked_t fscache_n_object_avail;
60481+extern atomic_unchecked_t fscache_n_object_dead;
60482
60483-extern atomic_t fscache_n_checkaux_none;
60484-extern atomic_t fscache_n_checkaux_okay;
60485-extern atomic_t fscache_n_checkaux_update;
60486-extern atomic_t fscache_n_checkaux_obsolete;
60487+extern atomic_unchecked_t fscache_n_checkaux_none;
60488+extern atomic_unchecked_t fscache_n_checkaux_okay;
60489+extern atomic_unchecked_t fscache_n_checkaux_update;
60490+extern atomic_unchecked_t fscache_n_checkaux_obsolete;
60491
60492 extern atomic_t fscache_n_cop_alloc_object;
60493 extern atomic_t fscache_n_cop_lookup_object;
60494@@ -272,6 +272,11 @@ static inline void fscache_stat(atomic_t *stat)
60495 atomic_inc(stat);
60496 }
60497
60498+static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
60499+{
60500+ atomic_inc_unchecked(stat);
60501+}
60502+
60503 static inline void fscache_stat_d(atomic_t *stat)
60504 {
60505 atomic_dec(stat);
60506@@ -284,6 +289,7 @@ extern const struct file_operations fscache_stats_fops;
60507
60508 #define __fscache_stat(stat) (NULL)
60509 #define fscache_stat(stat) do {} while (0)
60510+#define fscache_stat_unchecked(stat) do {} while (0)
60511 #define fscache_stat_d(stat) do {} while (0)
60512 #endif
60513
60514diff --git a/fs/fscache/object.c b/fs/fscache/object.c
60515index d3b4539..ed0c659 100644
60516--- a/fs/fscache/object.c
60517+++ b/fs/fscache/object.c
60518@@ -454,7 +454,7 @@ static const struct fscache_state *fscache_look_up_object(struct fscache_object
60519 _debug("LOOKUP \"%s\" in \"%s\"",
60520 cookie->def->name, object->cache->tag->name);
60521
60522- fscache_stat(&fscache_n_object_lookups);
60523+ fscache_stat_unchecked(&fscache_n_object_lookups);
60524 fscache_stat(&fscache_n_cop_lookup_object);
60525 ret = object->cache->ops->lookup_object(object);
60526 fscache_stat_d(&fscache_n_cop_lookup_object);
60527@@ -464,7 +464,7 @@ static const struct fscache_state *fscache_look_up_object(struct fscache_object
60528 if (ret == -ETIMEDOUT) {
60529 /* probably stuck behind another object, so move this one to
60530 * the back of the queue */
60531- fscache_stat(&fscache_n_object_lookups_timed_out);
60532+ fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
60533 _leave(" [timeout]");
60534 return NO_TRANSIT;
60535 }
60536@@ -492,7 +492,7 @@ void fscache_object_lookup_negative(struct fscache_object *object)
60537 _enter("{OBJ%x,%s}", object->debug_id, object->state->name);
60538
60539 if (!test_and_set_bit(FSCACHE_OBJECT_IS_LOOKED_UP, &object->flags)) {
60540- fscache_stat(&fscache_n_object_lookups_negative);
60541+ fscache_stat_unchecked(&fscache_n_object_lookups_negative);
60542
60543 /* Allow write requests to begin stacking up and read requests to begin
60544 * returning ENODATA.
60545@@ -527,7 +527,7 @@ void fscache_obtained_object(struct fscache_object *object)
60546 /* if we were still looking up, then we must have a positive lookup
60547 * result, in which case there may be data available */
60548 if (!test_and_set_bit(FSCACHE_OBJECT_IS_LOOKED_UP, &object->flags)) {
60549- fscache_stat(&fscache_n_object_lookups_positive);
60550+ fscache_stat_unchecked(&fscache_n_object_lookups_positive);
60551
60552 /* We do (presumably) have data */
60553 clear_bit_unlock(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
60554@@ -539,7 +539,7 @@ void fscache_obtained_object(struct fscache_object *object)
60555 clear_bit_unlock(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags);
60556 wake_up_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP);
60557 } else {
60558- fscache_stat(&fscache_n_object_created);
60559+ fscache_stat_unchecked(&fscache_n_object_created);
60560 }
60561
60562 set_bit(FSCACHE_OBJECT_IS_AVAILABLE, &object->flags);
60563@@ -575,7 +575,7 @@ static const struct fscache_state *fscache_object_available(struct fscache_objec
60564 fscache_stat_d(&fscache_n_cop_lookup_complete);
60565
60566 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
60567- fscache_stat(&fscache_n_object_avail);
60568+ fscache_stat_unchecked(&fscache_n_object_avail);
60569
60570 _leave("");
60571 return transit_to(JUMPSTART_DEPS);
60572@@ -722,7 +722,7 @@ static const struct fscache_state *fscache_drop_object(struct fscache_object *ob
60573
60574 /* this just shifts the object release to the work processor */
60575 fscache_put_object(object);
60576- fscache_stat(&fscache_n_object_dead);
60577+ fscache_stat_unchecked(&fscache_n_object_dead);
60578
60579 _leave("");
60580 return transit_to(OBJECT_DEAD);
60581@@ -887,7 +887,7 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
60582 enum fscache_checkaux result;
60583
60584 if (!object->cookie->def->check_aux) {
60585- fscache_stat(&fscache_n_checkaux_none);
60586+ fscache_stat_unchecked(&fscache_n_checkaux_none);
60587 return FSCACHE_CHECKAUX_OKAY;
60588 }
60589
60590@@ -896,17 +896,17 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
60591 switch (result) {
60592 /* entry okay as is */
60593 case FSCACHE_CHECKAUX_OKAY:
60594- fscache_stat(&fscache_n_checkaux_okay);
60595+ fscache_stat_unchecked(&fscache_n_checkaux_okay);
60596 break;
60597
60598 /* entry requires update */
60599 case FSCACHE_CHECKAUX_NEEDS_UPDATE:
60600- fscache_stat(&fscache_n_checkaux_update);
60601+ fscache_stat_unchecked(&fscache_n_checkaux_update);
60602 break;
60603
60604 /* entry requires deletion */
60605 case FSCACHE_CHECKAUX_OBSOLETE:
60606- fscache_stat(&fscache_n_checkaux_obsolete);
60607+ fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
60608 break;
60609
60610 default:
60611@@ -992,7 +992,7 @@ static const struct fscache_state *fscache_invalidate_object(struct fscache_obje
60612 {
60613 const struct fscache_state *s;
60614
60615- fscache_stat(&fscache_n_invalidates_run);
60616+ fscache_stat_unchecked(&fscache_n_invalidates_run);
60617 fscache_stat(&fscache_n_cop_invalidate_object);
60618 s = _fscache_invalidate_object(object, event);
60619 fscache_stat_d(&fscache_n_cop_invalidate_object);
60620@@ -1007,7 +1007,7 @@ static const struct fscache_state *fscache_update_object(struct fscache_object *
60621 {
60622 _enter("{OBJ%x},%d", object->debug_id, event);
60623
60624- fscache_stat(&fscache_n_updates_run);
60625+ fscache_stat_unchecked(&fscache_n_updates_run);
60626 fscache_stat(&fscache_n_cop_update_object);
60627 object->cache->ops->update_object(object);
60628 fscache_stat_d(&fscache_n_cop_update_object);
60629diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c
60630index 318071a..379938b 100644
60631--- a/fs/fscache/operation.c
60632+++ b/fs/fscache/operation.c
60633@@ -17,7 +17,7 @@
60634 #include <linux/slab.h>
60635 #include "internal.h"
60636
60637-atomic_t fscache_op_debug_id;
60638+atomic_unchecked_t fscache_op_debug_id;
60639 EXPORT_SYMBOL(fscache_op_debug_id);
60640
60641 /**
60642@@ -39,7 +39,7 @@ void fscache_enqueue_operation(struct fscache_operation *op)
60643 ASSERTCMP(atomic_read(&op->usage), >, 0);
60644 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_IN_PROGRESS);
60645
60646- fscache_stat(&fscache_n_op_enqueue);
60647+ fscache_stat_unchecked(&fscache_n_op_enqueue);
60648 switch (op->flags & FSCACHE_OP_TYPE) {
60649 case FSCACHE_OP_ASYNC:
60650 _debug("queue async");
60651@@ -73,7 +73,7 @@ static void fscache_run_op(struct fscache_object *object,
60652 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
60653 if (op->processor)
60654 fscache_enqueue_operation(op);
60655- fscache_stat(&fscache_n_op_run);
60656+ fscache_stat_unchecked(&fscache_n_op_run);
60657 }
60658
60659 /*
60660@@ -105,11 +105,11 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
60661 if (object->n_in_progress > 0) {
60662 atomic_inc(&op->usage);
60663 list_add_tail(&op->pend_link, &object->pending_ops);
60664- fscache_stat(&fscache_n_op_pend);
60665+ fscache_stat_unchecked(&fscache_n_op_pend);
60666 } else if (!list_empty(&object->pending_ops)) {
60667 atomic_inc(&op->usage);
60668 list_add_tail(&op->pend_link, &object->pending_ops);
60669- fscache_stat(&fscache_n_op_pend);
60670+ fscache_stat_unchecked(&fscache_n_op_pend);
60671 fscache_start_operations(object);
60672 } else {
60673 ASSERTCMP(object->n_in_progress, ==, 0);
60674@@ -125,7 +125,7 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
60675 object->n_exclusive++; /* reads and writes must wait */
60676 atomic_inc(&op->usage);
60677 list_add_tail(&op->pend_link, &object->pending_ops);
60678- fscache_stat(&fscache_n_op_pend);
60679+ fscache_stat_unchecked(&fscache_n_op_pend);
60680 ret = 0;
60681 } else {
60682 /* If we're in any other state, there must have been an I/O
60683@@ -212,11 +212,11 @@ int fscache_submit_op(struct fscache_object *object,
60684 if (object->n_exclusive > 0) {
60685 atomic_inc(&op->usage);
60686 list_add_tail(&op->pend_link, &object->pending_ops);
60687- fscache_stat(&fscache_n_op_pend);
60688+ fscache_stat_unchecked(&fscache_n_op_pend);
60689 } else if (!list_empty(&object->pending_ops)) {
60690 atomic_inc(&op->usage);
60691 list_add_tail(&op->pend_link, &object->pending_ops);
60692- fscache_stat(&fscache_n_op_pend);
60693+ fscache_stat_unchecked(&fscache_n_op_pend);
60694 fscache_start_operations(object);
60695 } else {
60696 ASSERTCMP(object->n_exclusive, ==, 0);
60697@@ -228,10 +228,10 @@ int fscache_submit_op(struct fscache_object *object,
60698 object->n_ops++;
60699 atomic_inc(&op->usage);
60700 list_add_tail(&op->pend_link, &object->pending_ops);
60701- fscache_stat(&fscache_n_op_pend);
60702+ fscache_stat_unchecked(&fscache_n_op_pend);
60703 ret = 0;
60704 } else if (fscache_object_is_dying(object)) {
60705- fscache_stat(&fscache_n_op_rejected);
60706+ fscache_stat_unchecked(&fscache_n_op_rejected);
60707 op->state = FSCACHE_OP_ST_CANCELLED;
60708 ret = -ENOBUFS;
60709 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
60710@@ -310,7 +310,7 @@ int fscache_cancel_op(struct fscache_operation *op,
60711 ret = -EBUSY;
60712 if (op->state == FSCACHE_OP_ST_PENDING) {
60713 ASSERT(!list_empty(&op->pend_link));
60714- fscache_stat(&fscache_n_op_cancelled);
60715+ fscache_stat_unchecked(&fscache_n_op_cancelled);
60716 list_del_init(&op->pend_link);
60717 if (do_cancel)
60718 do_cancel(op);
60719@@ -342,7 +342,7 @@ void fscache_cancel_all_ops(struct fscache_object *object)
60720 while (!list_empty(&object->pending_ops)) {
60721 op = list_entry(object->pending_ops.next,
60722 struct fscache_operation, pend_link);
60723- fscache_stat(&fscache_n_op_cancelled);
60724+ fscache_stat_unchecked(&fscache_n_op_cancelled);
60725 list_del_init(&op->pend_link);
60726
60727 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_PENDING);
60728@@ -414,7 +414,7 @@ void fscache_put_operation(struct fscache_operation *op)
60729 op->state, ==, FSCACHE_OP_ST_CANCELLED);
60730 op->state = FSCACHE_OP_ST_DEAD;
60731
60732- fscache_stat(&fscache_n_op_release);
60733+ fscache_stat_unchecked(&fscache_n_op_release);
60734
60735 if (op->release) {
60736 op->release(op);
60737@@ -433,7 +433,7 @@ void fscache_put_operation(struct fscache_operation *op)
60738 * lock, and defer it otherwise */
60739 if (!spin_trylock(&object->lock)) {
60740 _debug("defer put");
60741- fscache_stat(&fscache_n_op_deferred_release);
60742+ fscache_stat_unchecked(&fscache_n_op_deferred_release);
60743
60744 cache = object->cache;
60745 spin_lock(&cache->op_gc_list_lock);
60746@@ -486,7 +486,7 @@ void fscache_operation_gc(struct work_struct *work)
60747
60748 _debug("GC DEFERRED REL OBJ%x OP%x",
60749 object->debug_id, op->debug_id);
60750- fscache_stat(&fscache_n_op_gc);
60751+ fscache_stat_unchecked(&fscache_n_op_gc);
60752
60753 ASSERTCMP(atomic_read(&op->usage), ==, 0);
60754 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_DEAD);
60755diff --git a/fs/fscache/page.c b/fs/fscache/page.c
60756index 7f5c658..6c1e164 100644
60757--- a/fs/fscache/page.c
60758+++ b/fs/fscache/page.c
60759@@ -61,7 +61,7 @@ try_again:
60760 val = radix_tree_lookup(&cookie->stores, page->index);
60761 if (!val) {
60762 rcu_read_unlock();
60763- fscache_stat(&fscache_n_store_vmscan_not_storing);
60764+ fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
60765 __fscache_uncache_page(cookie, page);
60766 return true;
60767 }
60768@@ -91,11 +91,11 @@ try_again:
60769 spin_unlock(&cookie->stores_lock);
60770
60771 if (xpage) {
60772- fscache_stat(&fscache_n_store_vmscan_cancelled);
60773- fscache_stat(&fscache_n_store_radix_deletes);
60774+ fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
60775+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
60776 ASSERTCMP(xpage, ==, page);
60777 } else {
60778- fscache_stat(&fscache_n_store_vmscan_gone);
60779+ fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
60780 }
60781
60782 wake_up_bit(&cookie->flags, 0);
60783@@ -110,11 +110,11 @@ page_busy:
60784 * sleeping on memory allocation, so we may need to impose a timeout
60785 * too. */
60786 if (!(gfp & __GFP_WAIT) || !(gfp & __GFP_FS)) {
60787- fscache_stat(&fscache_n_store_vmscan_busy);
60788+ fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
60789 return false;
60790 }
60791
60792- fscache_stat(&fscache_n_store_vmscan_wait);
60793+ fscache_stat_unchecked(&fscache_n_store_vmscan_wait);
60794 __fscache_wait_on_page_write(cookie, page);
60795 gfp &= ~__GFP_WAIT;
60796 goto try_again;
60797@@ -140,7 +140,7 @@ static void fscache_end_page_write(struct fscache_object *object,
60798 FSCACHE_COOKIE_STORING_TAG);
60799 if (!radix_tree_tag_get(&cookie->stores, page->index,
60800 FSCACHE_COOKIE_PENDING_TAG)) {
60801- fscache_stat(&fscache_n_store_radix_deletes);
60802+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
60803 xpage = radix_tree_delete(&cookie->stores, page->index);
60804 }
60805 spin_unlock(&cookie->stores_lock);
60806@@ -161,7 +161,7 @@ static void fscache_attr_changed_op(struct fscache_operation *op)
60807
60808 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
60809
60810- fscache_stat(&fscache_n_attr_changed_calls);
60811+ fscache_stat_unchecked(&fscache_n_attr_changed_calls);
60812
60813 if (fscache_object_is_active(object)) {
60814 fscache_stat(&fscache_n_cop_attr_changed);
60815@@ -188,11 +188,11 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
60816
60817 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
60818
60819- fscache_stat(&fscache_n_attr_changed);
60820+ fscache_stat_unchecked(&fscache_n_attr_changed);
60821
60822 op = kzalloc(sizeof(*op), GFP_KERNEL);
60823 if (!op) {
60824- fscache_stat(&fscache_n_attr_changed_nomem);
60825+ fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
60826 _leave(" = -ENOMEM");
60827 return -ENOMEM;
60828 }
60829@@ -214,7 +214,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
60830 if (fscache_submit_exclusive_op(object, op) < 0)
60831 goto nobufs;
60832 spin_unlock(&cookie->lock);
60833- fscache_stat(&fscache_n_attr_changed_ok);
60834+ fscache_stat_unchecked(&fscache_n_attr_changed_ok);
60835 fscache_put_operation(op);
60836 _leave(" = 0");
60837 return 0;
60838@@ -225,7 +225,7 @@ nobufs:
60839 kfree(op);
60840 if (wake_cookie)
60841 __fscache_wake_unused_cookie(cookie);
60842- fscache_stat(&fscache_n_attr_changed_nobufs);
60843+ fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
60844 _leave(" = %d", -ENOBUFS);
60845 return -ENOBUFS;
60846 }
60847@@ -264,7 +264,7 @@ static struct fscache_retrieval *fscache_alloc_retrieval(
60848 /* allocate a retrieval operation and attempt to submit it */
60849 op = kzalloc(sizeof(*op), GFP_NOIO);
60850 if (!op) {
60851- fscache_stat(&fscache_n_retrievals_nomem);
60852+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
60853 return NULL;
60854 }
60855
60856@@ -294,13 +294,13 @@ int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
60857 return 0;
60858 }
60859
60860- fscache_stat(&fscache_n_retrievals_wait);
60861+ fscache_stat_unchecked(&fscache_n_retrievals_wait);
60862
60863 jif = jiffies;
60864 if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
60865 fscache_wait_bit_interruptible,
60866 TASK_INTERRUPTIBLE) != 0) {
60867- fscache_stat(&fscache_n_retrievals_intr);
60868+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
60869 _leave(" = -ERESTARTSYS");
60870 return -ERESTARTSYS;
60871 }
60872@@ -329,8 +329,8 @@ static void fscache_do_cancel_retrieval(struct fscache_operation *_op)
60873 */
60874 int fscache_wait_for_operation_activation(struct fscache_object *object,
60875 struct fscache_operation *op,
60876- atomic_t *stat_op_waits,
60877- atomic_t *stat_object_dead,
60878+ atomic_unchecked_t *stat_op_waits,
60879+ atomic_unchecked_t *stat_object_dead,
60880 void (*do_cancel)(struct fscache_operation *))
60881 {
60882 int ret;
60883@@ -340,7 +340,7 @@ int fscache_wait_for_operation_activation(struct fscache_object *object,
60884
60885 _debug(">>> WT");
60886 if (stat_op_waits)
60887- fscache_stat(stat_op_waits);
60888+ fscache_stat_unchecked(stat_op_waits);
60889 if (wait_on_bit(&op->flags, FSCACHE_OP_WAITING,
60890 fscache_wait_bit_interruptible,
60891 TASK_INTERRUPTIBLE) != 0) {
60892@@ -358,7 +358,7 @@ int fscache_wait_for_operation_activation(struct fscache_object *object,
60893 check_if_dead:
60894 if (op->state == FSCACHE_OP_ST_CANCELLED) {
60895 if (stat_object_dead)
60896- fscache_stat(stat_object_dead);
60897+ fscache_stat_unchecked(stat_object_dead);
60898 _leave(" = -ENOBUFS [cancelled]");
60899 return -ENOBUFS;
60900 }
60901@@ -366,7 +366,7 @@ check_if_dead:
60902 pr_err("%s() = -ENOBUFS [obj dead %d]\n", __func__, op->state);
60903 fscache_cancel_op(op, do_cancel);
60904 if (stat_object_dead)
60905- fscache_stat(stat_object_dead);
60906+ fscache_stat_unchecked(stat_object_dead);
60907 return -ENOBUFS;
60908 }
60909 return 0;
60910@@ -394,7 +394,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
60911
60912 _enter("%p,%p,,,", cookie, page);
60913
60914- fscache_stat(&fscache_n_retrievals);
60915+ fscache_stat_unchecked(&fscache_n_retrievals);
60916
60917 if (hlist_empty(&cookie->backing_objects))
60918 goto nobufs;
60919@@ -436,7 +436,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
60920 goto nobufs_unlock_dec;
60921 spin_unlock(&cookie->lock);
60922
60923- fscache_stat(&fscache_n_retrieval_ops);
60924+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
60925
60926 /* pin the netfs read context in case we need to do the actual netfs
60927 * read because we've encountered a cache read failure */
60928@@ -467,15 +467,15 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
60929
60930 error:
60931 if (ret == -ENOMEM)
60932- fscache_stat(&fscache_n_retrievals_nomem);
60933+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
60934 else if (ret == -ERESTARTSYS)
60935- fscache_stat(&fscache_n_retrievals_intr);
60936+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
60937 else if (ret == -ENODATA)
60938- fscache_stat(&fscache_n_retrievals_nodata);
60939+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
60940 else if (ret < 0)
60941- fscache_stat(&fscache_n_retrievals_nobufs);
60942+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
60943 else
60944- fscache_stat(&fscache_n_retrievals_ok);
60945+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
60946
60947 fscache_put_retrieval(op);
60948 _leave(" = %d", ret);
60949@@ -490,7 +490,7 @@ nobufs_unlock:
60950 __fscache_wake_unused_cookie(cookie);
60951 kfree(op);
60952 nobufs:
60953- fscache_stat(&fscache_n_retrievals_nobufs);
60954+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
60955 _leave(" = -ENOBUFS");
60956 return -ENOBUFS;
60957 }
60958@@ -529,7 +529,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
60959
60960 _enter("%p,,%d,,,", cookie, *nr_pages);
60961
60962- fscache_stat(&fscache_n_retrievals);
60963+ fscache_stat_unchecked(&fscache_n_retrievals);
60964
60965 if (hlist_empty(&cookie->backing_objects))
60966 goto nobufs;
60967@@ -567,7 +567,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
60968 goto nobufs_unlock_dec;
60969 spin_unlock(&cookie->lock);
60970
60971- fscache_stat(&fscache_n_retrieval_ops);
60972+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
60973
60974 /* pin the netfs read context in case we need to do the actual netfs
60975 * read because we've encountered a cache read failure */
60976@@ -598,15 +598,15 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
60977
60978 error:
60979 if (ret == -ENOMEM)
60980- fscache_stat(&fscache_n_retrievals_nomem);
60981+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
60982 else if (ret == -ERESTARTSYS)
60983- fscache_stat(&fscache_n_retrievals_intr);
60984+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
60985 else if (ret == -ENODATA)
60986- fscache_stat(&fscache_n_retrievals_nodata);
60987+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
60988 else if (ret < 0)
60989- fscache_stat(&fscache_n_retrievals_nobufs);
60990+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
60991 else
60992- fscache_stat(&fscache_n_retrievals_ok);
60993+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
60994
60995 fscache_put_retrieval(op);
60996 _leave(" = %d", ret);
60997@@ -621,7 +621,7 @@ nobufs_unlock:
60998 if (wake_cookie)
60999 __fscache_wake_unused_cookie(cookie);
61000 nobufs:
61001- fscache_stat(&fscache_n_retrievals_nobufs);
61002+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
61003 _leave(" = -ENOBUFS");
61004 return -ENOBUFS;
61005 }
61006@@ -646,7 +646,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
61007
61008 _enter("%p,%p,,,", cookie, page);
61009
61010- fscache_stat(&fscache_n_allocs);
61011+ fscache_stat_unchecked(&fscache_n_allocs);
61012
61013 if (hlist_empty(&cookie->backing_objects))
61014 goto nobufs;
61015@@ -680,7 +680,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
61016 goto nobufs_unlock_dec;
61017 spin_unlock(&cookie->lock);
61018
61019- fscache_stat(&fscache_n_alloc_ops);
61020+ fscache_stat_unchecked(&fscache_n_alloc_ops);
61021
61022 ret = fscache_wait_for_operation_activation(
61023 object, &op->op,
61024@@ -697,11 +697,11 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
61025
61026 error:
61027 if (ret == -ERESTARTSYS)
61028- fscache_stat(&fscache_n_allocs_intr);
61029+ fscache_stat_unchecked(&fscache_n_allocs_intr);
61030 else if (ret < 0)
61031- fscache_stat(&fscache_n_allocs_nobufs);
61032+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
61033 else
61034- fscache_stat(&fscache_n_allocs_ok);
61035+ fscache_stat_unchecked(&fscache_n_allocs_ok);
61036
61037 fscache_put_retrieval(op);
61038 _leave(" = %d", ret);
61039@@ -715,7 +715,7 @@ nobufs_unlock:
61040 if (wake_cookie)
61041 __fscache_wake_unused_cookie(cookie);
61042 nobufs:
61043- fscache_stat(&fscache_n_allocs_nobufs);
61044+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
61045 _leave(" = -ENOBUFS");
61046 return -ENOBUFS;
61047 }
61048@@ -791,7 +791,7 @@ static void fscache_write_op(struct fscache_operation *_op)
61049
61050 spin_lock(&cookie->stores_lock);
61051
61052- fscache_stat(&fscache_n_store_calls);
61053+ fscache_stat_unchecked(&fscache_n_store_calls);
61054
61055 /* find a page to store */
61056 page = NULL;
61057@@ -802,7 +802,7 @@ static void fscache_write_op(struct fscache_operation *_op)
61058 page = results[0];
61059 _debug("gang %d [%lx]", n, page->index);
61060 if (page->index > op->store_limit) {
61061- fscache_stat(&fscache_n_store_pages_over_limit);
61062+ fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
61063 goto superseded;
61064 }
61065
61066@@ -814,7 +814,7 @@ static void fscache_write_op(struct fscache_operation *_op)
61067 spin_unlock(&cookie->stores_lock);
61068 spin_unlock(&object->lock);
61069
61070- fscache_stat(&fscache_n_store_pages);
61071+ fscache_stat_unchecked(&fscache_n_store_pages);
61072 fscache_stat(&fscache_n_cop_write_page);
61073 ret = object->cache->ops->write_page(op, page);
61074 fscache_stat_d(&fscache_n_cop_write_page);
61075@@ -918,7 +918,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
61076 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
61077 ASSERT(PageFsCache(page));
61078
61079- fscache_stat(&fscache_n_stores);
61080+ fscache_stat_unchecked(&fscache_n_stores);
61081
61082 if (test_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) {
61083 _leave(" = -ENOBUFS [invalidating]");
61084@@ -977,7 +977,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
61085 spin_unlock(&cookie->stores_lock);
61086 spin_unlock(&object->lock);
61087
61088- op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
61089+ op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
61090 op->store_limit = object->store_limit;
61091
61092 __fscache_use_cookie(cookie);
61093@@ -986,8 +986,8 @@ int __fscache_write_page(struct fscache_cookie *cookie,
61094
61095 spin_unlock(&cookie->lock);
61096 radix_tree_preload_end();
61097- fscache_stat(&fscache_n_store_ops);
61098- fscache_stat(&fscache_n_stores_ok);
61099+ fscache_stat_unchecked(&fscache_n_store_ops);
61100+ fscache_stat_unchecked(&fscache_n_stores_ok);
61101
61102 /* the work queue now carries its own ref on the object */
61103 fscache_put_operation(&op->op);
61104@@ -995,14 +995,14 @@ int __fscache_write_page(struct fscache_cookie *cookie,
61105 return 0;
61106
61107 already_queued:
61108- fscache_stat(&fscache_n_stores_again);
61109+ fscache_stat_unchecked(&fscache_n_stores_again);
61110 already_pending:
61111 spin_unlock(&cookie->stores_lock);
61112 spin_unlock(&object->lock);
61113 spin_unlock(&cookie->lock);
61114 radix_tree_preload_end();
61115 kfree(op);
61116- fscache_stat(&fscache_n_stores_ok);
61117+ fscache_stat_unchecked(&fscache_n_stores_ok);
61118 _leave(" = 0");
61119 return 0;
61120
61121@@ -1024,14 +1024,14 @@ nobufs:
61122 kfree(op);
61123 if (wake_cookie)
61124 __fscache_wake_unused_cookie(cookie);
61125- fscache_stat(&fscache_n_stores_nobufs);
61126+ fscache_stat_unchecked(&fscache_n_stores_nobufs);
61127 _leave(" = -ENOBUFS");
61128 return -ENOBUFS;
61129
61130 nomem_free:
61131 kfree(op);
61132 nomem:
61133- fscache_stat(&fscache_n_stores_oom);
61134+ fscache_stat_unchecked(&fscache_n_stores_oom);
61135 _leave(" = -ENOMEM");
61136 return -ENOMEM;
61137 }
61138@@ -1049,7 +1049,7 @@ void __fscache_uncache_page(struct fscache_cookie *cookie, struct page *page)
61139 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
61140 ASSERTCMP(page, !=, NULL);
61141
61142- fscache_stat(&fscache_n_uncaches);
61143+ fscache_stat_unchecked(&fscache_n_uncaches);
61144
61145 /* cache withdrawal may beat us to it */
61146 if (!PageFsCache(page))
61147@@ -1100,7 +1100,7 @@ void fscache_mark_page_cached(struct fscache_retrieval *op, struct page *page)
61148 struct fscache_cookie *cookie = op->op.object->cookie;
61149
61150 #ifdef CONFIG_FSCACHE_STATS
61151- atomic_inc(&fscache_n_marks);
61152+ atomic_inc_unchecked(&fscache_n_marks);
61153 #endif
61154
61155 _debug("- mark %p{%lx}", page, page->index);
61156diff --git a/fs/fscache/stats.c b/fs/fscache/stats.c
61157index 40d13c7..ddf52b9 100644
61158--- a/fs/fscache/stats.c
61159+++ b/fs/fscache/stats.c
61160@@ -18,99 +18,99 @@
61161 /*
61162 * operation counters
61163 */
61164-atomic_t fscache_n_op_pend;
61165-atomic_t fscache_n_op_run;
61166-atomic_t fscache_n_op_enqueue;
61167-atomic_t fscache_n_op_requeue;
61168-atomic_t fscache_n_op_deferred_release;
61169-atomic_t fscache_n_op_release;
61170-atomic_t fscache_n_op_gc;
61171-atomic_t fscache_n_op_cancelled;
61172-atomic_t fscache_n_op_rejected;
61173+atomic_unchecked_t fscache_n_op_pend;
61174+atomic_unchecked_t fscache_n_op_run;
61175+atomic_unchecked_t fscache_n_op_enqueue;
61176+atomic_unchecked_t fscache_n_op_requeue;
61177+atomic_unchecked_t fscache_n_op_deferred_release;
61178+atomic_unchecked_t fscache_n_op_release;
61179+atomic_unchecked_t fscache_n_op_gc;
61180+atomic_unchecked_t fscache_n_op_cancelled;
61181+atomic_unchecked_t fscache_n_op_rejected;
61182
61183-atomic_t fscache_n_attr_changed;
61184-atomic_t fscache_n_attr_changed_ok;
61185-atomic_t fscache_n_attr_changed_nobufs;
61186-atomic_t fscache_n_attr_changed_nomem;
61187-atomic_t fscache_n_attr_changed_calls;
61188+atomic_unchecked_t fscache_n_attr_changed;
61189+atomic_unchecked_t fscache_n_attr_changed_ok;
61190+atomic_unchecked_t fscache_n_attr_changed_nobufs;
61191+atomic_unchecked_t fscache_n_attr_changed_nomem;
61192+atomic_unchecked_t fscache_n_attr_changed_calls;
61193
61194-atomic_t fscache_n_allocs;
61195-atomic_t fscache_n_allocs_ok;
61196-atomic_t fscache_n_allocs_wait;
61197-atomic_t fscache_n_allocs_nobufs;
61198-atomic_t fscache_n_allocs_intr;
61199-atomic_t fscache_n_allocs_object_dead;
61200-atomic_t fscache_n_alloc_ops;
61201-atomic_t fscache_n_alloc_op_waits;
61202+atomic_unchecked_t fscache_n_allocs;
61203+atomic_unchecked_t fscache_n_allocs_ok;
61204+atomic_unchecked_t fscache_n_allocs_wait;
61205+atomic_unchecked_t fscache_n_allocs_nobufs;
61206+atomic_unchecked_t fscache_n_allocs_intr;
61207+atomic_unchecked_t fscache_n_allocs_object_dead;
61208+atomic_unchecked_t fscache_n_alloc_ops;
61209+atomic_unchecked_t fscache_n_alloc_op_waits;
61210
61211-atomic_t fscache_n_retrievals;
61212-atomic_t fscache_n_retrievals_ok;
61213-atomic_t fscache_n_retrievals_wait;
61214-atomic_t fscache_n_retrievals_nodata;
61215-atomic_t fscache_n_retrievals_nobufs;
61216-atomic_t fscache_n_retrievals_intr;
61217-atomic_t fscache_n_retrievals_nomem;
61218-atomic_t fscache_n_retrievals_object_dead;
61219-atomic_t fscache_n_retrieval_ops;
61220-atomic_t fscache_n_retrieval_op_waits;
61221+atomic_unchecked_t fscache_n_retrievals;
61222+atomic_unchecked_t fscache_n_retrievals_ok;
61223+atomic_unchecked_t fscache_n_retrievals_wait;
61224+atomic_unchecked_t fscache_n_retrievals_nodata;
61225+atomic_unchecked_t fscache_n_retrievals_nobufs;
61226+atomic_unchecked_t fscache_n_retrievals_intr;
61227+atomic_unchecked_t fscache_n_retrievals_nomem;
61228+atomic_unchecked_t fscache_n_retrievals_object_dead;
61229+atomic_unchecked_t fscache_n_retrieval_ops;
61230+atomic_unchecked_t fscache_n_retrieval_op_waits;
61231
61232-atomic_t fscache_n_stores;
61233-atomic_t fscache_n_stores_ok;
61234-atomic_t fscache_n_stores_again;
61235-atomic_t fscache_n_stores_nobufs;
61236-atomic_t fscache_n_stores_oom;
61237-atomic_t fscache_n_store_ops;
61238-atomic_t fscache_n_store_calls;
61239-atomic_t fscache_n_store_pages;
61240-atomic_t fscache_n_store_radix_deletes;
61241-atomic_t fscache_n_store_pages_over_limit;
61242+atomic_unchecked_t fscache_n_stores;
61243+atomic_unchecked_t fscache_n_stores_ok;
61244+atomic_unchecked_t fscache_n_stores_again;
61245+atomic_unchecked_t fscache_n_stores_nobufs;
61246+atomic_unchecked_t fscache_n_stores_oom;
61247+atomic_unchecked_t fscache_n_store_ops;
61248+atomic_unchecked_t fscache_n_store_calls;
61249+atomic_unchecked_t fscache_n_store_pages;
61250+atomic_unchecked_t fscache_n_store_radix_deletes;
61251+atomic_unchecked_t fscache_n_store_pages_over_limit;
61252
61253-atomic_t fscache_n_store_vmscan_not_storing;
61254-atomic_t fscache_n_store_vmscan_gone;
61255-atomic_t fscache_n_store_vmscan_busy;
61256-atomic_t fscache_n_store_vmscan_cancelled;
61257-atomic_t fscache_n_store_vmscan_wait;
61258+atomic_unchecked_t fscache_n_store_vmscan_not_storing;
61259+atomic_unchecked_t fscache_n_store_vmscan_gone;
61260+atomic_unchecked_t fscache_n_store_vmscan_busy;
61261+atomic_unchecked_t fscache_n_store_vmscan_cancelled;
61262+atomic_unchecked_t fscache_n_store_vmscan_wait;
61263
61264-atomic_t fscache_n_marks;
61265-atomic_t fscache_n_uncaches;
61266+atomic_unchecked_t fscache_n_marks;
61267+atomic_unchecked_t fscache_n_uncaches;
61268
61269-atomic_t fscache_n_acquires;
61270-atomic_t fscache_n_acquires_null;
61271-atomic_t fscache_n_acquires_no_cache;
61272-atomic_t fscache_n_acquires_ok;
61273-atomic_t fscache_n_acquires_nobufs;
61274-atomic_t fscache_n_acquires_oom;
61275+atomic_unchecked_t fscache_n_acquires;
61276+atomic_unchecked_t fscache_n_acquires_null;
61277+atomic_unchecked_t fscache_n_acquires_no_cache;
61278+atomic_unchecked_t fscache_n_acquires_ok;
61279+atomic_unchecked_t fscache_n_acquires_nobufs;
61280+atomic_unchecked_t fscache_n_acquires_oom;
61281
61282-atomic_t fscache_n_invalidates;
61283-atomic_t fscache_n_invalidates_run;
61284+atomic_unchecked_t fscache_n_invalidates;
61285+atomic_unchecked_t fscache_n_invalidates_run;
61286
61287-atomic_t fscache_n_updates;
61288-atomic_t fscache_n_updates_null;
61289-atomic_t fscache_n_updates_run;
61290+atomic_unchecked_t fscache_n_updates;
61291+atomic_unchecked_t fscache_n_updates_null;
61292+atomic_unchecked_t fscache_n_updates_run;
61293
61294-atomic_t fscache_n_relinquishes;
61295-atomic_t fscache_n_relinquishes_null;
61296-atomic_t fscache_n_relinquishes_waitcrt;
61297-atomic_t fscache_n_relinquishes_retire;
61298+atomic_unchecked_t fscache_n_relinquishes;
61299+atomic_unchecked_t fscache_n_relinquishes_null;
61300+atomic_unchecked_t fscache_n_relinquishes_waitcrt;
61301+atomic_unchecked_t fscache_n_relinquishes_retire;
61302
61303-atomic_t fscache_n_cookie_index;
61304-atomic_t fscache_n_cookie_data;
61305-atomic_t fscache_n_cookie_special;
61306+atomic_unchecked_t fscache_n_cookie_index;
61307+atomic_unchecked_t fscache_n_cookie_data;
61308+atomic_unchecked_t fscache_n_cookie_special;
61309
61310-atomic_t fscache_n_object_alloc;
61311-atomic_t fscache_n_object_no_alloc;
61312-atomic_t fscache_n_object_lookups;
61313-atomic_t fscache_n_object_lookups_negative;
61314-atomic_t fscache_n_object_lookups_positive;
61315-atomic_t fscache_n_object_lookups_timed_out;
61316-atomic_t fscache_n_object_created;
61317-atomic_t fscache_n_object_avail;
61318-atomic_t fscache_n_object_dead;
61319+atomic_unchecked_t fscache_n_object_alloc;
61320+atomic_unchecked_t fscache_n_object_no_alloc;
61321+atomic_unchecked_t fscache_n_object_lookups;
61322+atomic_unchecked_t fscache_n_object_lookups_negative;
61323+atomic_unchecked_t fscache_n_object_lookups_positive;
61324+atomic_unchecked_t fscache_n_object_lookups_timed_out;
61325+atomic_unchecked_t fscache_n_object_created;
61326+atomic_unchecked_t fscache_n_object_avail;
61327+atomic_unchecked_t fscache_n_object_dead;
61328
61329-atomic_t fscache_n_checkaux_none;
61330-atomic_t fscache_n_checkaux_okay;
61331-atomic_t fscache_n_checkaux_update;
61332-atomic_t fscache_n_checkaux_obsolete;
61333+atomic_unchecked_t fscache_n_checkaux_none;
61334+atomic_unchecked_t fscache_n_checkaux_okay;
61335+atomic_unchecked_t fscache_n_checkaux_update;
61336+atomic_unchecked_t fscache_n_checkaux_obsolete;
61337
61338 atomic_t fscache_n_cop_alloc_object;
61339 atomic_t fscache_n_cop_lookup_object;
61340@@ -138,118 +138,118 @@ static int fscache_stats_show(struct seq_file *m, void *v)
61341 seq_puts(m, "FS-Cache statistics\n");
61342
61343 seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
61344- atomic_read(&fscache_n_cookie_index),
61345- atomic_read(&fscache_n_cookie_data),
61346- atomic_read(&fscache_n_cookie_special));
61347+ atomic_read_unchecked(&fscache_n_cookie_index),
61348+ atomic_read_unchecked(&fscache_n_cookie_data),
61349+ atomic_read_unchecked(&fscache_n_cookie_special));
61350
61351 seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
61352- atomic_read(&fscache_n_object_alloc),
61353- atomic_read(&fscache_n_object_no_alloc),
61354- atomic_read(&fscache_n_object_avail),
61355- atomic_read(&fscache_n_object_dead));
61356+ atomic_read_unchecked(&fscache_n_object_alloc),
61357+ atomic_read_unchecked(&fscache_n_object_no_alloc),
61358+ atomic_read_unchecked(&fscache_n_object_avail),
61359+ atomic_read_unchecked(&fscache_n_object_dead));
61360 seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
61361- atomic_read(&fscache_n_checkaux_none),
61362- atomic_read(&fscache_n_checkaux_okay),
61363- atomic_read(&fscache_n_checkaux_update),
61364- atomic_read(&fscache_n_checkaux_obsolete));
61365+ atomic_read_unchecked(&fscache_n_checkaux_none),
61366+ atomic_read_unchecked(&fscache_n_checkaux_okay),
61367+ atomic_read_unchecked(&fscache_n_checkaux_update),
61368+ atomic_read_unchecked(&fscache_n_checkaux_obsolete));
61369
61370 seq_printf(m, "Pages : mrk=%u unc=%u\n",
61371- atomic_read(&fscache_n_marks),
61372- atomic_read(&fscache_n_uncaches));
61373+ atomic_read_unchecked(&fscache_n_marks),
61374+ atomic_read_unchecked(&fscache_n_uncaches));
61375
61376 seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
61377 " oom=%u\n",
61378- atomic_read(&fscache_n_acquires),
61379- atomic_read(&fscache_n_acquires_null),
61380- atomic_read(&fscache_n_acquires_no_cache),
61381- atomic_read(&fscache_n_acquires_ok),
61382- atomic_read(&fscache_n_acquires_nobufs),
61383- atomic_read(&fscache_n_acquires_oom));
61384+ atomic_read_unchecked(&fscache_n_acquires),
61385+ atomic_read_unchecked(&fscache_n_acquires_null),
61386+ atomic_read_unchecked(&fscache_n_acquires_no_cache),
61387+ atomic_read_unchecked(&fscache_n_acquires_ok),
61388+ atomic_read_unchecked(&fscache_n_acquires_nobufs),
61389+ atomic_read_unchecked(&fscache_n_acquires_oom));
61390
61391 seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
61392- atomic_read(&fscache_n_object_lookups),
61393- atomic_read(&fscache_n_object_lookups_negative),
61394- atomic_read(&fscache_n_object_lookups_positive),
61395- atomic_read(&fscache_n_object_created),
61396- atomic_read(&fscache_n_object_lookups_timed_out));
61397+ atomic_read_unchecked(&fscache_n_object_lookups),
61398+ atomic_read_unchecked(&fscache_n_object_lookups_negative),
61399+ atomic_read_unchecked(&fscache_n_object_lookups_positive),
61400+ atomic_read_unchecked(&fscache_n_object_created),
61401+ atomic_read_unchecked(&fscache_n_object_lookups_timed_out));
61402
61403 seq_printf(m, "Invals : n=%u run=%u\n",
61404- atomic_read(&fscache_n_invalidates),
61405- atomic_read(&fscache_n_invalidates_run));
61406+ atomic_read_unchecked(&fscache_n_invalidates),
61407+ atomic_read_unchecked(&fscache_n_invalidates_run));
61408
61409 seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
61410- atomic_read(&fscache_n_updates),
61411- atomic_read(&fscache_n_updates_null),
61412- atomic_read(&fscache_n_updates_run));
61413+ atomic_read_unchecked(&fscache_n_updates),
61414+ atomic_read_unchecked(&fscache_n_updates_null),
61415+ atomic_read_unchecked(&fscache_n_updates_run));
61416
61417 seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
61418- atomic_read(&fscache_n_relinquishes),
61419- atomic_read(&fscache_n_relinquishes_null),
61420- atomic_read(&fscache_n_relinquishes_waitcrt),
61421- atomic_read(&fscache_n_relinquishes_retire));
61422+ atomic_read_unchecked(&fscache_n_relinquishes),
61423+ atomic_read_unchecked(&fscache_n_relinquishes_null),
61424+ atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
61425+ atomic_read_unchecked(&fscache_n_relinquishes_retire));
61426
61427 seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
61428- atomic_read(&fscache_n_attr_changed),
61429- atomic_read(&fscache_n_attr_changed_ok),
61430- atomic_read(&fscache_n_attr_changed_nobufs),
61431- atomic_read(&fscache_n_attr_changed_nomem),
61432- atomic_read(&fscache_n_attr_changed_calls));
61433+ atomic_read_unchecked(&fscache_n_attr_changed),
61434+ atomic_read_unchecked(&fscache_n_attr_changed_ok),
61435+ atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
61436+ atomic_read_unchecked(&fscache_n_attr_changed_nomem),
61437+ atomic_read_unchecked(&fscache_n_attr_changed_calls));
61438
61439 seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
61440- atomic_read(&fscache_n_allocs),
61441- atomic_read(&fscache_n_allocs_ok),
61442- atomic_read(&fscache_n_allocs_wait),
61443- atomic_read(&fscache_n_allocs_nobufs),
61444- atomic_read(&fscache_n_allocs_intr));
61445+ atomic_read_unchecked(&fscache_n_allocs),
61446+ atomic_read_unchecked(&fscache_n_allocs_ok),
61447+ atomic_read_unchecked(&fscache_n_allocs_wait),
61448+ atomic_read_unchecked(&fscache_n_allocs_nobufs),
61449+ atomic_read_unchecked(&fscache_n_allocs_intr));
61450 seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
61451- atomic_read(&fscache_n_alloc_ops),
61452- atomic_read(&fscache_n_alloc_op_waits),
61453- atomic_read(&fscache_n_allocs_object_dead));
61454+ atomic_read_unchecked(&fscache_n_alloc_ops),
61455+ atomic_read_unchecked(&fscache_n_alloc_op_waits),
61456+ atomic_read_unchecked(&fscache_n_allocs_object_dead));
61457
61458 seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
61459 " int=%u oom=%u\n",
61460- atomic_read(&fscache_n_retrievals),
61461- atomic_read(&fscache_n_retrievals_ok),
61462- atomic_read(&fscache_n_retrievals_wait),
61463- atomic_read(&fscache_n_retrievals_nodata),
61464- atomic_read(&fscache_n_retrievals_nobufs),
61465- atomic_read(&fscache_n_retrievals_intr),
61466- atomic_read(&fscache_n_retrievals_nomem));
61467+ atomic_read_unchecked(&fscache_n_retrievals),
61468+ atomic_read_unchecked(&fscache_n_retrievals_ok),
61469+ atomic_read_unchecked(&fscache_n_retrievals_wait),
61470+ atomic_read_unchecked(&fscache_n_retrievals_nodata),
61471+ atomic_read_unchecked(&fscache_n_retrievals_nobufs),
61472+ atomic_read_unchecked(&fscache_n_retrievals_intr),
61473+ atomic_read_unchecked(&fscache_n_retrievals_nomem));
61474 seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
61475- atomic_read(&fscache_n_retrieval_ops),
61476- atomic_read(&fscache_n_retrieval_op_waits),
61477- atomic_read(&fscache_n_retrievals_object_dead));
61478+ atomic_read_unchecked(&fscache_n_retrieval_ops),
61479+ atomic_read_unchecked(&fscache_n_retrieval_op_waits),
61480+ atomic_read_unchecked(&fscache_n_retrievals_object_dead));
61481
61482 seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
61483- atomic_read(&fscache_n_stores),
61484- atomic_read(&fscache_n_stores_ok),
61485- atomic_read(&fscache_n_stores_again),
61486- atomic_read(&fscache_n_stores_nobufs),
61487- atomic_read(&fscache_n_stores_oom));
61488+ atomic_read_unchecked(&fscache_n_stores),
61489+ atomic_read_unchecked(&fscache_n_stores_ok),
61490+ atomic_read_unchecked(&fscache_n_stores_again),
61491+ atomic_read_unchecked(&fscache_n_stores_nobufs),
61492+ atomic_read_unchecked(&fscache_n_stores_oom));
61493 seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
61494- atomic_read(&fscache_n_store_ops),
61495- atomic_read(&fscache_n_store_calls),
61496- atomic_read(&fscache_n_store_pages),
61497- atomic_read(&fscache_n_store_radix_deletes),
61498- atomic_read(&fscache_n_store_pages_over_limit));
61499+ atomic_read_unchecked(&fscache_n_store_ops),
61500+ atomic_read_unchecked(&fscache_n_store_calls),
61501+ atomic_read_unchecked(&fscache_n_store_pages),
61502+ atomic_read_unchecked(&fscache_n_store_radix_deletes),
61503+ atomic_read_unchecked(&fscache_n_store_pages_over_limit));
61504
61505 seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u wt=%u\n",
61506- atomic_read(&fscache_n_store_vmscan_not_storing),
61507- atomic_read(&fscache_n_store_vmscan_gone),
61508- atomic_read(&fscache_n_store_vmscan_busy),
61509- atomic_read(&fscache_n_store_vmscan_cancelled),
61510- atomic_read(&fscache_n_store_vmscan_wait));
61511+ atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
61512+ atomic_read_unchecked(&fscache_n_store_vmscan_gone),
61513+ atomic_read_unchecked(&fscache_n_store_vmscan_busy),
61514+ atomic_read_unchecked(&fscache_n_store_vmscan_cancelled),
61515+ atomic_read_unchecked(&fscache_n_store_vmscan_wait));
61516
61517 seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
61518- atomic_read(&fscache_n_op_pend),
61519- atomic_read(&fscache_n_op_run),
61520- atomic_read(&fscache_n_op_enqueue),
61521- atomic_read(&fscache_n_op_cancelled),
61522- atomic_read(&fscache_n_op_rejected));
61523+ atomic_read_unchecked(&fscache_n_op_pend),
61524+ atomic_read_unchecked(&fscache_n_op_run),
61525+ atomic_read_unchecked(&fscache_n_op_enqueue),
61526+ atomic_read_unchecked(&fscache_n_op_cancelled),
61527+ atomic_read_unchecked(&fscache_n_op_rejected));
61528 seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
61529- atomic_read(&fscache_n_op_deferred_release),
61530- atomic_read(&fscache_n_op_release),
61531- atomic_read(&fscache_n_op_gc));
61532+ atomic_read_unchecked(&fscache_n_op_deferred_release),
61533+ atomic_read_unchecked(&fscache_n_op_release),
61534+ atomic_read_unchecked(&fscache_n_op_gc));
61535
61536 seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
61537 atomic_read(&fscache_n_cop_alloc_object),
61538diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
61539index b96a49b..9bfdc47 100644
61540--- a/fs/fuse/cuse.c
61541+++ b/fs/fuse/cuse.c
61542@@ -606,10 +606,12 @@ static int __init cuse_init(void)
61543 INIT_LIST_HEAD(&cuse_conntbl[i]);
61544
61545 /* inherit and extend fuse_dev_operations */
61546- cuse_channel_fops = fuse_dev_operations;
61547- cuse_channel_fops.owner = THIS_MODULE;
61548- cuse_channel_fops.open = cuse_channel_open;
61549- cuse_channel_fops.release = cuse_channel_release;
61550+ pax_open_kernel();
61551+ memcpy((void *)&cuse_channel_fops, &fuse_dev_operations, sizeof(fuse_dev_operations));
61552+ *(void **)&cuse_channel_fops.owner = THIS_MODULE;
61553+ *(void **)&cuse_channel_fops.open = cuse_channel_open;
61554+ *(void **)&cuse_channel_fops.release = cuse_channel_release;
61555+ pax_close_kernel();
61556
61557 cuse_class = class_create(THIS_MODULE, "cuse");
61558 if (IS_ERR(cuse_class))
61559diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
61560index 0a648bb..8d463f1 100644
61561--- a/fs/fuse/dev.c
61562+++ b/fs/fuse/dev.c
61563@@ -1323,7 +1323,7 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
61564 ret = 0;
61565 pipe_lock(pipe);
61566
61567- if (!pipe->readers) {
61568+ if (!atomic_read(&pipe->readers)) {
61569 send_sig(SIGPIPE, current, 0);
61570 if (!ret)
61571 ret = -EPIPE;
61572@@ -1352,7 +1352,7 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
61573 page_nr++;
61574 ret += buf->len;
61575
61576- if (pipe->files)
61577+ if (atomic_read(&pipe->files))
61578 do_wakeup = 1;
61579 }
61580
61581diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
61582index 1d1292c..bba17ea 100644
61583--- a/fs/fuse/dir.c
61584+++ b/fs/fuse/dir.c
61585@@ -1418,7 +1418,7 @@ static char *read_link(struct dentry *dentry)
61586 return link;
61587 }
61588
61589-static void free_link(char *link)
61590+static void free_link(const char *link)
61591 {
61592 if (!IS_ERR(link))
61593 free_page((unsigned long) link);
61594diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c
61595index fe649d3..c679164 100644
61596--- a/fs/hostfs/hostfs_kern.c
61597+++ b/fs/hostfs/hostfs_kern.c
61598@@ -898,7 +898,7 @@ static void *hostfs_follow_link(struct dentry *dentry, struct nameidata *nd)
61599
61600 static void hostfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
61601 {
61602- char *s = nd_get_link(nd);
61603+ const char *s = nd_get_link(nd);
61604 if (!IS_ERR(s))
61605 __putname(s);
61606 }
61607diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
61608index d19b30a..ef89c36 100644
61609--- a/fs/hugetlbfs/inode.c
61610+++ b/fs/hugetlbfs/inode.c
61611@@ -152,6 +152,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
61612 struct mm_struct *mm = current->mm;
61613 struct vm_area_struct *vma;
61614 struct hstate *h = hstate_file(file);
61615+ unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
61616 struct vm_unmapped_area_info info;
61617
61618 if (len & ~huge_page_mask(h))
61619@@ -165,17 +166,26 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
61620 return addr;
61621 }
61622
61623+#ifdef CONFIG_PAX_RANDMMAP
61624+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
61625+#endif
61626+
61627 if (addr) {
61628 addr = ALIGN(addr, huge_page_size(h));
61629 vma = find_vma(mm, addr);
61630- if (TASK_SIZE - len >= addr &&
61631- (!vma || addr + len <= vma->vm_start))
61632+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
61633 return addr;
61634 }
61635
61636 info.flags = 0;
61637 info.length = len;
61638 info.low_limit = TASK_UNMAPPED_BASE;
61639+
61640+#ifdef CONFIG_PAX_RANDMMAP
61641+ if (mm->pax_flags & MF_PAX_RANDMMAP)
61642+ info.low_limit += mm->delta_mmap;
61643+#endif
61644+
61645 info.high_limit = TASK_SIZE;
61646 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
61647 info.align_offset = 0;
61648@@ -908,7 +918,7 @@ static struct file_system_type hugetlbfs_fs_type = {
61649 };
61650 MODULE_ALIAS_FS("hugetlbfs");
61651
61652-static struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
61653+struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
61654
61655 static int can_do_hugetlb_shm(void)
61656 {
61657diff --git a/fs/inode.c b/fs/inode.c
61658index 4bcdad3..1883822 100644
61659--- a/fs/inode.c
61660+++ b/fs/inode.c
61661@@ -841,8 +841,8 @@ unsigned int get_next_ino(void)
61662
61663 #ifdef CONFIG_SMP
61664 if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) {
61665- static atomic_t shared_last_ino;
61666- int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino);
61667+ static atomic_unchecked_t shared_last_ino;
61668+ int next = atomic_add_return_unchecked(LAST_INO_BATCH, &shared_last_ino);
61669
61670 res = next - LAST_INO_BATCH;
61671 }
61672diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c
61673index 4a6cf28..d3a29d3 100644
61674--- a/fs/jffs2/erase.c
61675+++ b/fs/jffs2/erase.c
61676@@ -452,7 +452,8 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb
61677 struct jffs2_unknown_node marker = {
61678 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
61679 .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
61680- .totlen = cpu_to_je32(c->cleanmarker_size)
61681+ .totlen = cpu_to_je32(c->cleanmarker_size),
61682+ .hdr_crc = cpu_to_je32(0)
61683 };
61684
61685 jffs2_prealloc_raw_node_refs(c, jeb, 1);
61686diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c
61687index a6597d6..41b30ec 100644
61688--- a/fs/jffs2/wbuf.c
61689+++ b/fs/jffs2/wbuf.c
61690@@ -1023,7 +1023,8 @@ static const struct jffs2_unknown_node oob_cleanmarker =
61691 {
61692 .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
61693 .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
61694- .totlen = constant_cpu_to_je32(8)
61695+ .totlen = constant_cpu_to_je32(8),
61696+ .hdr_crc = constant_cpu_to_je32(0)
61697 };
61698
61699 /*
61700diff --git a/fs/jfs/super.c b/fs/jfs/super.c
61701index e2b7483..855bca3 100644
61702--- a/fs/jfs/super.c
61703+++ b/fs/jfs/super.c
61704@@ -884,7 +884,7 @@ static int __init init_jfs_fs(void)
61705
61706 jfs_inode_cachep =
61707 kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
61708- SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
61709+ SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY,
61710 init_once);
61711 if (jfs_inode_cachep == NULL)
61712 return -ENOMEM;
61713diff --git a/fs/kernfs/dir.c b/fs/kernfs/dir.c
61714index 39c0143..d54fad4 100644
61715--- a/fs/kernfs/dir.c
61716+++ b/fs/kernfs/dir.c
61717@@ -28,7 +28,7 @@ DEFINE_MUTEX(kernfs_mutex);
61718 *
61719 * Returns 31 bit hash of ns + name (so it fits in an off_t )
61720 */
61721-static unsigned int kernfs_name_hash(const char *name, const void *ns)
61722+static unsigned int kernfs_name_hash(const unsigned char *name, const void *ns)
61723 {
61724 unsigned long hash = init_name_hash();
61725 unsigned int len = strlen(name);
61726diff --git a/fs/kernfs/file.c b/fs/kernfs/file.c
61727index d29640b..32d2b6b 100644
61728--- a/fs/kernfs/file.c
61729+++ b/fs/kernfs/file.c
61730@@ -33,7 +33,7 @@ static DEFINE_MUTEX(kernfs_open_file_mutex);
61731
61732 struct kernfs_open_node {
61733 atomic_t refcnt;
61734- atomic_t event;
61735+ atomic_unchecked_t event;
61736 wait_queue_head_t poll;
61737 struct list_head files; /* goes through kernfs_open_file.list */
61738 };
61739@@ -149,7 +149,7 @@ static int kernfs_seq_show(struct seq_file *sf, void *v)
61740 {
61741 struct kernfs_open_file *of = sf->private;
61742
61743- of->event = atomic_read(&of->kn->attr.open->event);
61744+ of->event = atomic_read_unchecked(&of->kn->attr.open->event);
61745
61746 return of->kn->attr.ops->seq_show(sf, v);
61747 }
61748@@ -353,12 +353,12 @@ static int kernfs_vma_page_mkwrite(struct vm_area_struct *vma,
61749 return ret;
61750 }
61751
61752-static int kernfs_vma_access(struct vm_area_struct *vma, unsigned long addr,
61753- void *buf, int len, int write)
61754+static ssize_t kernfs_vma_access(struct vm_area_struct *vma, unsigned long addr,
61755+ void *buf, size_t len, int write)
61756 {
61757 struct file *file = vma->vm_file;
61758 struct kernfs_open_file *of = kernfs_of(file);
61759- int ret;
61760+ ssize_t ret;
61761
61762 if (!of->vm_ops)
61763 return -EINVAL;
61764@@ -559,7 +559,7 @@ static int kernfs_get_open_node(struct kernfs_node *kn,
61765 return -ENOMEM;
61766
61767 atomic_set(&new_on->refcnt, 0);
61768- atomic_set(&new_on->event, 1);
61769+ atomic_set_unchecked(&new_on->event, 1);
61770 init_waitqueue_head(&new_on->poll);
61771 INIT_LIST_HEAD(&new_on->files);
61772 goto retry;
61773@@ -756,7 +756,7 @@ static unsigned int kernfs_fop_poll(struct file *filp, poll_table *wait)
61774
61775 kernfs_put_active(kn);
61776
61777- if (of->event != atomic_read(&on->event))
61778+ if (of->event != atomic_read_unchecked(&on->event))
61779 goto trigger;
61780
61781 return DEFAULT_POLLMASK;
61782@@ -781,7 +781,7 @@ void kernfs_notify(struct kernfs_node *kn)
61783 if (!WARN_ON(kernfs_type(kn) != KERNFS_FILE)) {
61784 on = kn->attr.open;
61785 if (on) {
61786- atomic_inc(&on->event);
61787+ atomic_inc_unchecked(&on->event);
61788 wake_up_interruptible(&on->poll);
61789 }
61790 }
61791diff --git a/fs/kernfs/symlink.c b/fs/kernfs/symlink.c
61792index 4d45705..b35e0bd 100644
61793--- a/fs/kernfs/symlink.c
61794+++ b/fs/kernfs/symlink.c
61795@@ -132,7 +132,7 @@ static void *kernfs_iop_follow_link(struct dentry *dentry, struct nameidata *nd)
61796 static void kernfs_iop_put_link(struct dentry *dentry, struct nameidata *nd,
61797 void *cookie)
61798 {
61799- char *page = nd_get_link(nd);
61800+ const char *page = nd_get_link(nd);
61801 if (!IS_ERR(page))
61802 free_page((unsigned long)page);
61803 }
61804diff --git a/fs/libfs.c b/fs/libfs.c
61805index a184424..944ddce 100644
61806--- a/fs/libfs.c
61807+++ b/fs/libfs.c
61808@@ -159,6 +159,9 @@ int dcache_readdir(struct file *file, struct dir_context *ctx)
61809
61810 for (p = q->next; p != &dentry->d_subdirs; p = p->next) {
61811 struct dentry *next = list_entry(p, struct dentry, d_u.d_child);
61812+ char d_name[sizeof(next->d_iname)];
61813+ const unsigned char *name;
61814+
61815 spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
61816 if (!simple_positive(next)) {
61817 spin_unlock(&next->d_lock);
61818@@ -167,7 +170,12 @@ int dcache_readdir(struct file *file, struct dir_context *ctx)
61819
61820 spin_unlock(&next->d_lock);
61821 spin_unlock(&dentry->d_lock);
61822- if (!dir_emit(ctx, next->d_name.name, next->d_name.len,
61823+ name = next->d_name.name;
61824+ if (name == next->d_iname) {
61825+ memcpy(d_name, name, next->d_name.len);
61826+ name = d_name;
61827+ }
61828+ if (!dir_emit(ctx, name, next->d_name.len,
61829 next->d_inode->i_ino, dt_type(next->d_inode)))
61830 return 0;
61831 spin_lock(&dentry->d_lock);
61832@@ -999,7 +1007,7 @@ EXPORT_SYMBOL(noop_fsync);
61833 void kfree_put_link(struct dentry *dentry, struct nameidata *nd,
61834 void *cookie)
61835 {
61836- char *s = nd_get_link(nd);
61837+ const char *s = nd_get_link(nd);
61838 if (!IS_ERR(s))
61839 kfree(s);
61840 }
61841diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
61842index acd3947..1f896e2 100644
61843--- a/fs/lockd/clntproc.c
61844+++ b/fs/lockd/clntproc.c
61845@@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt_cancel_ops;
61846 /*
61847 * Cookie counter for NLM requests
61848 */
61849-static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
61850+static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234);
61851
61852 void nlmclnt_next_cookie(struct nlm_cookie *c)
61853 {
61854- u32 cookie = atomic_inc_return(&nlm_cookie);
61855+ u32 cookie = atomic_inc_return_unchecked(&nlm_cookie);
61856
61857 memcpy(c->data, &cookie, 4);
61858 c->len=4;
61859diff --git a/fs/locks.c b/fs/locks.c
61860index 4dd39b9..12d6aaf 100644
61861--- a/fs/locks.c
61862+++ b/fs/locks.c
61863@@ -2218,16 +2218,16 @@ void locks_remove_flock(struct file *filp)
61864 return;
61865
61866 if (filp->f_op->flock) {
61867- struct file_lock fl = {
61868+ struct file_lock flock = {
61869 .fl_pid = current->tgid,
61870 .fl_file = filp,
61871 .fl_flags = FL_FLOCK,
61872 .fl_type = F_UNLCK,
61873 .fl_end = OFFSET_MAX,
61874 };
61875- filp->f_op->flock(filp, F_SETLKW, &fl);
61876- if (fl.fl_ops && fl.fl_ops->fl_release_private)
61877- fl.fl_ops->fl_release_private(&fl);
61878+ filp->f_op->flock(filp, F_SETLKW, &flock);
61879+ if (flock.fl_ops && flock.fl_ops->fl_release_private)
61880+ flock.fl_ops->fl_release_private(&flock);
61881 }
61882
61883 spin_lock(&inode->i_lock);
61884diff --git a/fs/mount.h b/fs/mount.h
61885index b29e42f..5ea7fdf 100644
61886--- a/fs/mount.h
61887+++ b/fs/mount.h
61888@@ -11,7 +11,7 @@ struct mnt_namespace {
61889 u64 seq; /* Sequence number to prevent loops */
61890 wait_queue_head_t poll;
61891 int event;
61892-};
61893+} __randomize_layout;
61894
61895 struct mnt_pcp {
61896 int mnt_count;
61897@@ -57,7 +57,7 @@ struct mount {
61898 int mnt_expiry_mark; /* true if marked for expiry */
61899 int mnt_pinned;
61900 struct path mnt_ex_mountpoint;
61901-};
61902+} __randomize_layout;
61903
61904 #define MNT_NS_INTERNAL ERR_PTR(-EINVAL) /* distinct from any mnt_namespace */
61905
61906diff --git a/fs/namei.c b/fs/namei.c
61907index 4a3c105..0d718f4 100644
61908--- a/fs/namei.c
61909+++ b/fs/namei.c
61910@@ -330,16 +330,32 @@ int generic_permission(struct inode *inode, int mask)
61911 if (ret != -EACCES)
61912 return ret;
61913
61914+#ifdef CONFIG_GRKERNSEC
61915+ /* we'll block if we have to log due to a denied capability use */
61916+ if (mask & MAY_NOT_BLOCK)
61917+ return -ECHILD;
61918+#endif
61919+
61920 if (S_ISDIR(inode->i_mode)) {
61921 /* DACs are overridable for directories */
61922- if (inode_capable(inode, CAP_DAC_OVERRIDE))
61923- return 0;
61924 if (!(mask & MAY_WRITE))
61925- if (inode_capable(inode, CAP_DAC_READ_SEARCH))
61926+ if (inode_capable_nolog(inode, CAP_DAC_OVERRIDE) ||
61927+ inode_capable(inode, CAP_DAC_READ_SEARCH))
61928 return 0;
61929+ if (inode_capable(inode, CAP_DAC_OVERRIDE))
61930+ return 0;
61931 return -EACCES;
61932 }
61933 /*
61934+ * Searching includes executable on directories, else just read.
61935+ */
61936+ mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
61937+ if (mask == MAY_READ)
61938+ if (inode_capable_nolog(inode, CAP_DAC_OVERRIDE) ||
61939+ inode_capable(inode, CAP_DAC_READ_SEARCH))
61940+ return 0;
61941+
61942+ /*
61943 * Read/write DACs are always overridable.
61944 * Executable DACs are overridable when there is
61945 * at least one exec bit set.
61946@@ -348,14 +364,6 @@ int generic_permission(struct inode *inode, int mask)
61947 if (inode_capable(inode, CAP_DAC_OVERRIDE))
61948 return 0;
61949
61950- /*
61951- * Searching includes executable on directories, else just read.
61952- */
61953- mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
61954- if (mask == MAY_READ)
61955- if (inode_capable(inode, CAP_DAC_READ_SEARCH))
61956- return 0;
61957-
61958 return -EACCES;
61959 }
61960
61961@@ -821,7 +829,7 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
61962 {
61963 struct dentry *dentry = link->dentry;
61964 int error;
61965- char *s;
61966+ const char *s;
61967
61968 BUG_ON(nd->flags & LOOKUP_RCU);
61969
61970@@ -842,6 +850,12 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
61971 if (error)
61972 goto out_put_nd_path;
61973
61974+ if (gr_handle_follow_link(dentry->d_parent->d_inode,
61975+ dentry->d_inode, dentry, nd->path.mnt)) {
61976+ error = -EACCES;
61977+ goto out_put_nd_path;
61978+ }
61979+
61980 nd->last_type = LAST_BIND;
61981 *p = dentry->d_inode->i_op->follow_link(dentry, nd);
61982 error = PTR_ERR(*p);
61983@@ -1590,6 +1604,8 @@ static inline int nested_symlink(struct path *path, struct nameidata *nd)
61984 if (res)
61985 break;
61986 res = walk_component(nd, path, LOOKUP_FOLLOW);
61987+ if (res >= 0 && gr_handle_symlink_owner(&link, nd->inode))
61988+ res = -EACCES;
61989 put_link(nd, &link, cookie);
61990 } while (res > 0);
61991
61992@@ -1663,7 +1679,7 @@ EXPORT_SYMBOL(full_name_hash);
61993 static inline unsigned long hash_name(const char *name, unsigned int *hashp)
61994 {
61995 unsigned long a, b, adata, bdata, mask, hash, len;
61996- const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
61997+ static const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
61998
61999 hash = a = 0;
62000 len = -sizeof(unsigned long);
62001@@ -1947,6 +1963,8 @@ static int path_lookupat(int dfd, const char *name,
62002 if (err)
62003 break;
62004 err = lookup_last(nd, &path);
62005+ if (!err && gr_handle_symlink_owner(&link, nd->inode))
62006+ err = -EACCES;
62007 put_link(nd, &link, cookie);
62008 }
62009 }
62010@@ -1954,6 +1972,13 @@ static int path_lookupat(int dfd, const char *name,
62011 if (!err)
62012 err = complete_walk(nd);
62013
62014+ if (!err && !(nd->flags & LOOKUP_PARENT)) {
62015+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
62016+ path_put(&nd->path);
62017+ err = -ENOENT;
62018+ }
62019+ }
62020+
62021 if (!err && nd->flags & LOOKUP_DIRECTORY) {
62022 if (!d_is_directory(nd->path.dentry)) {
62023 path_put(&nd->path);
62024@@ -1981,8 +2006,15 @@ static int filename_lookup(int dfd, struct filename *name,
62025 retval = path_lookupat(dfd, name->name,
62026 flags | LOOKUP_REVAL, nd);
62027
62028- if (likely(!retval))
62029+ if (likely(!retval)) {
62030 audit_inode(name, nd->path.dentry, flags & LOOKUP_PARENT);
62031+ if (name->name[0] != '/' && nd->path.dentry && nd->inode) {
62032+ if (!gr_chroot_fchdir(nd->path.dentry, nd->path.mnt)) {
62033+ path_put(&nd->path);
62034+ return -ENOENT;
62035+ }
62036+ }
62037+ }
62038 return retval;
62039 }
62040
62041@@ -2556,6 +2588,13 @@ static int may_open(struct path *path, int acc_mode, int flag)
62042 if (flag & O_NOATIME && !inode_owner_or_capable(inode))
62043 return -EPERM;
62044
62045+ if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode))
62046+ return -EPERM;
62047+ if (gr_handle_rawio(inode))
62048+ return -EPERM;
62049+ if (!gr_acl_handle_open(dentry, path->mnt, acc_mode))
62050+ return -EACCES;
62051+
62052 return 0;
62053 }
62054
62055@@ -2787,7 +2826,7 @@ looked_up:
62056 * cleared otherwise prior to returning.
62057 */
62058 static int lookup_open(struct nameidata *nd, struct path *path,
62059- struct file *file,
62060+ struct path *link, struct file *file,
62061 const struct open_flags *op,
62062 bool got_write, int *opened)
62063 {
62064@@ -2822,6 +2861,17 @@ static int lookup_open(struct nameidata *nd, struct path *path,
62065 /* Negative dentry, just create the file */
62066 if (!dentry->d_inode && (op->open_flag & O_CREAT)) {
62067 umode_t mode = op->mode;
62068+
62069+ if (link && gr_handle_symlink_owner(link, dir->d_inode)) {
62070+ error = -EACCES;
62071+ goto out_dput;
62072+ }
62073+
62074+ if (!gr_acl_handle_creat(dentry, dir, nd->path.mnt, op->open_flag, op->acc_mode, mode)) {
62075+ error = -EACCES;
62076+ goto out_dput;
62077+ }
62078+
62079 if (!IS_POSIXACL(dir->d_inode))
62080 mode &= ~current_umask();
62081 /*
62082@@ -2843,6 +2893,8 @@ static int lookup_open(struct nameidata *nd, struct path *path,
62083 nd->flags & LOOKUP_EXCL);
62084 if (error)
62085 goto out_dput;
62086+ else
62087+ gr_handle_create(dentry, nd->path.mnt);
62088 }
62089 out_no_open:
62090 path->dentry = dentry;
62091@@ -2857,7 +2909,7 @@ out_dput:
62092 /*
62093 * Handle the last step of open()
62094 */
62095-static int do_last(struct nameidata *nd, struct path *path,
62096+static int do_last(struct nameidata *nd, struct path *path, struct path *link,
62097 struct file *file, const struct open_flags *op,
62098 int *opened, struct filename *name)
62099 {
62100@@ -2907,6 +2959,15 @@ static int do_last(struct nameidata *nd, struct path *path,
62101 if (error)
62102 return error;
62103
62104+ if (!gr_acl_handle_hidden_file(dir, nd->path.mnt)) {
62105+ error = -ENOENT;
62106+ goto out;
62107+ }
62108+ if (link && gr_handle_symlink_owner(link, nd->inode)) {
62109+ error = -EACCES;
62110+ goto out;
62111+ }
62112+
62113 audit_inode(name, dir, LOOKUP_PARENT);
62114 error = -EISDIR;
62115 /* trailing slashes? */
62116@@ -2926,7 +2987,7 @@ retry_lookup:
62117 */
62118 }
62119 mutex_lock(&dir->d_inode->i_mutex);
62120- error = lookup_open(nd, path, file, op, got_write, opened);
62121+ error = lookup_open(nd, path, link, file, op, got_write, opened);
62122 mutex_unlock(&dir->d_inode->i_mutex);
62123
62124 if (error <= 0) {
62125@@ -2950,11 +3011,28 @@ retry_lookup:
62126 goto finish_open_created;
62127 }
62128
62129+ if (!gr_acl_handle_hidden_file(path->dentry, nd->path.mnt)) {
62130+ error = -ENOENT;
62131+ goto exit_dput;
62132+ }
62133+ if (link && gr_handle_symlink_owner(link, path->dentry->d_inode)) {
62134+ error = -EACCES;
62135+ goto exit_dput;
62136+ }
62137+
62138 /*
62139 * create/update audit record if it already exists.
62140 */
62141- if (d_is_positive(path->dentry))
62142+ if (d_is_positive(path->dentry)) {
62143+ /* only check if O_CREAT is specified, all other checks need to go
62144+ into may_open */
62145+ if (gr_handle_fifo(path->dentry, path->mnt, dir, open_flag, acc_mode)) {
62146+ error = -EACCES;
62147+ goto exit_dput;
62148+ }
62149+
62150 audit_inode(name, path->dentry, 0);
62151+ }
62152
62153 /*
62154 * If atomic_open() acquired write access it is dropped now due to
62155@@ -2995,6 +3073,11 @@ finish_lookup:
62156 }
62157 }
62158 BUG_ON(inode != path->dentry->d_inode);
62159+ /* if we're resolving a symlink to another symlink */
62160+ if (link && gr_handle_symlink_owner(link, inode)) {
62161+ error = -EACCES;
62162+ goto out;
62163+ }
62164 return 1;
62165 }
62166
62167@@ -3004,7 +3087,6 @@ finish_lookup:
62168 save_parent.dentry = nd->path.dentry;
62169 save_parent.mnt = mntget(path->mnt);
62170 nd->path.dentry = path->dentry;
62171-
62172 }
62173 nd->inode = inode;
62174 /* Why this, you ask? _Now_ we might have grown LOOKUP_JUMPED... */
62175@@ -3014,7 +3096,18 @@ finish_open:
62176 path_put(&save_parent);
62177 return error;
62178 }
62179+
62180+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
62181+ error = -ENOENT;
62182+ goto out;
62183+ }
62184+ if (link && gr_handle_symlink_owner(link, nd->inode)) {
62185+ error = -EACCES;
62186+ goto out;
62187+ }
62188+
62189 audit_inode(name, nd->path.dentry, 0);
62190+
62191 error = -EISDIR;
62192 if ((open_flag & O_CREAT) &&
62193 (d_is_directory(nd->path.dentry) || d_is_autodir(nd->path.dentry)))
62194@@ -3178,7 +3271,7 @@ static struct file *path_openat(int dfd, struct filename *pathname,
62195 if (unlikely(error))
62196 goto out;
62197
62198- error = do_last(nd, &path, file, op, &opened, pathname);
62199+ error = do_last(nd, &path, NULL, file, op, &opened, pathname);
62200 while (unlikely(error > 0)) { /* trailing symlink */
62201 struct path link = path;
62202 void *cookie;
62203@@ -3196,7 +3289,7 @@ static struct file *path_openat(int dfd, struct filename *pathname,
62204 error = follow_link(&link, nd, &cookie);
62205 if (unlikely(error))
62206 break;
62207- error = do_last(nd, &path, file, op, &opened, pathname);
62208+ error = do_last(nd, &path, &link, file, op, &opened, pathname);
62209 put_link(nd, &link, cookie);
62210 }
62211 out:
62212@@ -3296,9 +3389,11 @@ struct dentry *kern_path_create(int dfd, const char *pathname,
62213 goto unlock;
62214
62215 error = -EEXIST;
62216- if (d_is_positive(dentry))
62217+ if (d_is_positive(dentry)) {
62218+ if (!gr_acl_handle_hidden_file(dentry, nd.path.mnt))
62219+ error = -ENOENT;
62220 goto fail;
62221-
62222+ }
62223 /*
62224 * Special case - lookup gave negative, but... we had foo/bar/
62225 * From the vfs_mknod() POV we just have a negative dentry -
62226@@ -3350,6 +3445,20 @@ struct dentry *user_path_create(int dfd, const char __user *pathname,
62227 }
62228 EXPORT_SYMBOL(user_path_create);
62229
62230+static struct dentry *user_path_create_with_name(int dfd, const char __user *pathname, struct path *path, struct filename **to, unsigned int lookup_flags)
62231+{
62232+ struct filename *tmp = getname(pathname);
62233+ struct dentry *res;
62234+ if (IS_ERR(tmp))
62235+ return ERR_CAST(tmp);
62236+ res = kern_path_create(dfd, tmp->name, path, lookup_flags);
62237+ if (IS_ERR(res))
62238+ putname(tmp);
62239+ else
62240+ *to = tmp;
62241+ return res;
62242+}
62243+
62244 int vfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
62245 {
62246 int error = may_create(dir, dentry);
62247@@ -3412,6 +3521,17 @@ retry:
62248
62249 if (!IS_POSIXACL(path.dentry->d_inode))
62250 mode &= ~current_umask();
62251+
62252+ if (gr_handle_chroot_mknod(dentry, path.mnt, mode)) {
62253+ error = -EPERM;
62254+ goto out;
62255+ }
62256+
62257+ if (!gr_acl_handle_mknod(dentry, path.dentry, path.mnt, mode)) {
62258+ error = -EACCES;
62259+ goto out;
62260+ }
62261+
62262 error = security_path_mknod(&path, dentry, mode, dev);
62263 if (error)
62264 goto out;
62265@@ -3428,6 +3548,8 @@ retry:
62266 break;
62267 }
62268 out:
62269+ if (!error)
62270+ gr_handle_create(dentry, path.mnt);
62271 done_path_create(&path, dentry);
62272 if (retry_estale(error, lookup_flags)) {
62273 lookup_flags |= LOOKUP_REVAL;
62274@@ -3480,9 +3602,16 @@ retry:
62275
62276 if (!IS_POSIXACL(path.dentry->d_inode))
62277 mode &= ~current_umask();
62278+ if (!gr_acl_handle_mkdir(dentry, path.dentry, path.mnt)) {
62279+ error = -EACCES;
62280+ goto out;
62281+ }
62282 error = security_path_mkdir(&path, dentry, mode);
62283 if (!error)
62284 error = vfs_mkdir(path.dentry->d_inode, dentry, mode);
62285+ if (!error)
62286+ gr_handle_create(dentry, path.mnt);
62287+out:
62288 done_path_create(&path, dentry);
62289 if (retry_estale(error, lookup_flags)) {
62290 lookup_flags |= LOOKUP_REVAL;
62291@@ -3563,6 +3692,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
62292 struct filename *name;
62293 struct dentry *dentry;
62294 struct nameidata nd;
62295+ ino_t saved_ino = 0;
62296+ dev_t saved_dev = 0;
62297 unsigned int lookup_flags = 0;
62298 retry:
62299 name = user_path_parent(dfd, pathname, &nd, lookup_flags);
62300@@ -3595,10 +3726,21 @@ retry:
62301 error = -ENOENT;
62302 goto exit3;
62303 }
62304+
62305+ saved_ino = dentry->d_inode->i_ino;
62306+ saved_dev = gr_get_dev_from_dentry(dentry);
62307+
62308+ if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
62309+ error = -EACCES;
62310+ goto exit3;
62311+ }
62312+
62313 error = security_path_rmdir(&nd.path, dentry);
62314 if (error)
62315 goto exit3;
62316 error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
62317+ if (!error && (saved_dev || saved_ino))
62318+ gr_handle_delete(saved_ino, saved_dev);
62319 exit3:
62320 dput(dentry);
62321 exit2:
62322@@ -3688,6 +3830,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
62323 struct nameidata nd;
62324 struct inode *inode = NULL;
62325 struct inode *delegated_inode = NULL;
62326+ ino_t saved_ino = 0;
62327+ dev_t saved_dev = 0;
62328 unsigned int lookup_flags = 0;
62329 retry:
62330 name = user_path_parent(dfd, pathname, &nd, lookup_flags);
62331@@ -3714,10 +3858,22 @@ retry_deleg:
62332 if (d_is_negative(dentry))
62333 goto slashes;
62334 ihold(inode);
62335+
62336+ if (inode->i_nlink <= 1) {
62337+ saved_ino = inode->i_ino;
62338+ saved_dev = gr_get_dev_from_dentry(dentry);
62339+ }
62340+ if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
62341+ error = -EACCES;
62342+ goto exit2;
62343+ }
62344+
62345 error = security_path_unlink(&nd.path, dentry);
62346 if (error)
62347 goto exit2;
62348 error = vfs_unlink(nd.path.dentry->d_inode, dentry, &delegated_inode);
62349+ if (!error && (saved_ino || saved_dev))
62350+ gr_handle_delete(saved_ino, saved_dev);
62351 exit2:
62352 dput(dentry);
62353 }
62354@@ -3805,9 +3961,17 @@ retry:
62355 if (IS_ERR(dentry))
62356 goto out_putname;
62357
62358+ if (!gr_acl_handle_symlink(dentry, path.dentry, path.mnt, from)) {
62359+ error = -EACCES;
62360+ goto out;
62361+ }
62362+
62363 error = security_path_symlink(&path, dentry, from->name);
62364 if (!error)
62365 error = vfs_symlink(path.dentry->d_inode, dentry, from->name);
62366+ if (!error)
62367+ gr_handle_create(dentry, path.mnt);
62368+out:
62369 done_path_create(&path, dentry);
62370 if (retry_estale(error, lookup_flags)) {
62371 lookup_flags |= LOOKUP_REVAL;
62372@@ -3910,6 +4074,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
62373 struct dentry *new_dentry;
62374 struct path old_path, new_path;
62375 struct inode *delegated_inode = NULL;
62376+ struct filename *to = NULL;
62377 int how = 0;
62378 int error;
62379
62380@@ -3933,7 +4098,7 @@ retry:
62381 if (error)
62382 return error;
62383
62384- new_dentry = user_path_create(newdfd, newname, &new_path,
62385+ new_dentry = user_path_create_with_name(newdfd, newname, &new_path, &to,
62386 (how & LOOKUP_REVAL));
62387 error = PTR_ERR(new_dentry);
62388 if (IS_ERR(new_dentry))
62389@@ -3945,11 +4110,28 @@ retry:
62390 error = may_linkat(&old_path);
62391 if (unlikely(error))
62392 goto out_dput;
62393+
62394+ if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
62395+ old_path.dentry->d_inode,
62396+ old_path.dentry->d_inode->i_mode, to)) {
62397+ error = -EACCES;
62398+ goto out_dput;
62399+ }
62400+
62401+ if (!gr_acl_handle_link(new_dentry, new_path.dentry, new_path.mnt,
62402+ old_path.dentry, old_path.mnt, to)) {
62403+ error = -EACCES;
62404+ goto out_dput;
62405+ }
62406+
62407 error = security_path_link(old_path.dentry, &new_path, new_dentry);
62408 if (error)
62409 goto out_dput;
62410 error = vfs_link(old_path.dentry, new_path.dentry->d_inode, new_dentry, &delegated_inode);
62411+ if (!error)
62412+ gr_handle_create(new_dentry, new_path.mnt);
62413 out_dput:
62414+ putname(to);
62415 done_path_create(&new_path, new_dentry);
62416 if (delegated_inode) {
62417 error = break_deleg_wait(&delegated_inode);
62418@@ -4236,6 +4418,12 @@ retry_deleg:
62419 if (new_dentry == trap)
62420 goto exit5;
62421
62422+ error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
62423+ old_dentry, old_dir->d_inode, oldnd.path.mnt,
62424+ to);
62425+ if (error)
62426+ goto exit5;
62427+
62428 error = security_path_rename(&oldnd.path, old_dentry,
62429 &newnd.path, new_dentry);
62430 if (error)
62431@@ -4243,6 +4431,9 @@ retry_deleg:
62432 error = vfs_rename(old_dir->d_inode, old_dentry,
62433 new_dir->d_inode, new_dentry,
62434 &delegated_inode);
62435+ if (!error)
62436+ gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
62437+ new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0);
62438 exit5:
62439 dput(new_dentry);
62440 exit4:
62441@@ -4279,6 +4470,8 @@ SYSCALL_DEFINE2(rename, const char __user *, oldname, const char __user *, newna
62442
62443 int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link)
62444 {
62445+ char tmpbuf[64];
62446+ const char *newlink;
62447 int len;
62448
62449 len = PTR_ERR(link);
62450@@ -4288,7 +4481,14 @@ int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const c
62451 len = strlen(link);
62452 if (len > (unsigned) buflen)
62453 len = buflen;
62454- if (copy_to_user(buffer, link, len))
62455+
62456+ if (len < sizeof(tmpbuf)) {
62457+ memcpy(tmpbuf, link, len);
62458+ newlink = tmpbuf;
62459+ } else
62460+ newlink = link;
62461+
62462+ if (copy_to_user(buffer, newlink, len))
62463 len = -EFAULT;
62464 out:
62465 return len;
62466diff --git a/fs/namespace.c b/fs/namespace.c
62467index 65233a5..82ac953 100644
62468--- a/fs/namespace.c
62469+++ b/fs/namespace.c
62470@@ -1339,6 +1339,9 @@ static int do_umount(struct mount *mnt, int flags)
62471 if (!(sb->s_flags & MS_RDONLY))
62472 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
62473 up_write(&sb->s_umount);
62474+
62475+ gr_log_remount(mnt->mnt_devname, retval);
62476+
62477 return retval;
62478 }
62479
62480@@ -1361,6 +1364,9 @@ static int do_umount(struct mount *mnt, int flags)
62481 }
62482 unlock_mount_hash();
62483 namespace_unlock();
62484+
62485+ gr_log_unmount(mnt->mnt_devname, retval);
62486+
62487 return retval;
62488 }
62489
62490@@ -1380,7 +1386,7 @@ static inline bool may_mount(void)
62491 * unixes. Our API is identical to OSF/1 to avoid making a mess of AMD
62492 */
62493
62494-SYSCALL_DEFINE2(umount, char __user *, name, int, flags)
62495+SYSCALL_DEFINE2(umount, const char __user *, name, int, flags)
62496 {
62497 struct path path;
62498 struct mount *mnt;
62499@@ -1422,7 +1428,7 @@ out:
62500 /*
62501 * The 2.0 compatible umount. No flags.
62502 */
62503-SYSCALL_DEFINE1(oldumount, char __user *, name)
62504+SYSCALL_DEFINE1(oldumount, const char __user *, name)
62505 {
62506 return sys_umount(name, 0);
62507 }
62508@@ -2431,6 +2437,16 @@ long do_mount(const char *dev_name, const char *dir_name,
62509 MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT |
62510 MS_STRICTATIME);
62511
62512+ if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
62513+ retval = -EPERM;
62514+ goto dput_out;
62515+ }
62516+
62517+ if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
62518+ retval = -EPERM;
62519+ goto dput_out;
62520+ }
62521+
62522 if (flags & MS_REMOUNT)
62523 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
62524 data_page);
62525@@ -2445,6 +2461,9 @@ long do_mount(const char *dev_name, const char *dir_name,
62526 dev_name, data_page);
62527 dput_out:
62528 path_put(&path);
62529+
62530+ gr_log_mount(dev_name, dir_name, retval);
62531+
62532 return retval;
62533 }
62534
62535@@ -2462,7 +2481,7 @@ static void free_mnt_ns(struct mnt_namespace *ns)
62536 * number incrementing at 10Ghz will take 12,427 years to wrap which
62537 * is effectively never, so we can ignore the possibility.
62538 */
62539-static atomic64_t mnt_ns_seq = ATOMIC64_INIT(1);
62540+static atomic64_unchecked_t mnt_ns_seq = ATOMIC64_INIT(1);
62541
62542 static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns)
62543 {
62544@@ -2477,7 +2496,7 @@ static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns)
62545 kfree(new_ns);
62546 return ERR_PTR(ret);
62547 }
62548- new_ns->seq = atomic64_add_return(1, &mnt_ns_seq);
62549+ new_ns->seq = atomic64_inc_return_unchecked(&mnt_ns_seq);
62550 atomic_set(&new_ns->count, 1);
62551 new_ns->root = NULL;
62552 INIT_LIST_HEAD(&new_ns->list);
62553@@ -2487,7 +2506,7 @@ static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns)
62554 return new_ns;
62555 }
62556
62557-struct mnt_namespace *copy_mnt_ns(unsigned long flags, struct mnt_namespace *ns,
62558+__latent_entropy struct mnt_namespace *copy_mnt_ns(unsigned long flags, struct mnt_namespace *ns,
62559 struct user_namespace *user_ns, struct fs_struct *new_fs)
62560 {
62561 struct mnt_namespace *new_ns;
62562@@ -2608,8 +2627,8 @@ struct dentry *mount_subtree(struct vfsmount *mnt, const char *name)
62563 }
62564 EXPORT_SYMBOL(mount_subtree);
62565
62566-SYSCALL_DEFINE5(mount, char __user *, dev_name, char __user *, dir_name,
62567- char __user *, type, unsigned long, flags, void __user *, data)
62568+SYSCALL_DEFINE5(mount, const char __user *, dev_name, const char __user *, dir_name,
62569+ const char __user *, type, unsigned long, flags, void __user *, data)
62570 {
62571 int ret;
62572 char *kernel_type;
62573@@ -2722,6 +2741,11 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
62574 if (error)
62575 goto out2;
62576
62577+ if (gr_handle_chroot_pivot()) {
62578+ error = -EPERM;
62579+ goto out2;
62580+ }
62581+
62582 get_fs_root(current->fs, &root);
62583 old_mp = lock_mount(&old);
62584 error = PTR_ERR(old_mp);
62585@@ -2990,7 +3014,7 @@ static int mntns_install(struct nsproxy *nsproxy, void *ns)
62586 !ns_capable(current_user_ns(), CAP_SYS_ADMIN))
62587 return -EPERM;
62588
62589- if (fs->users != 1)
62590+ if (atomic_read(&fs->users) != 1)
62591 return -EINVAL;
62592
62593 get_mnt_ns(mnt_ns);
62594diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c
62595index f4ccfe6..a5cf064 100644
62596--- a/fs/nfs/callback_xdr.c
62597+++ b/fs/nfs/callback_xdr.c
62598@@ -51,7 +51,7 @@ struct callback_op {
62599 callback_decode_arg_t decode_args;
62600 callback_encode_res_t encode_res;
62601 long res_maxsize;
62602-};
62603+} __do_const;
62604
62605 static struct callback_op callback_ops[];
62606
62607diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
62608index 360114a..ac6e265 100644
62609--- a/fs/nfs/inode.c
62610+++ b/fs/nfs/inode.c
62611@@ -1189,16 +1189,16 @@ static int nfs_size_need_update(const struct inode *inode, const struct nfs_fatt
62612 return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
62613 }
62614
62615-static atomic_long_t nfs_attr_generation_counter;
62616+static atomic_long_unchecked_t nfs_attr_generation_counter;
62617
62618 static unsigned long nfs_read_attr_generation_counter(void)
62619 {
62620- return atomic_long_read(&nfs_attr_generation_counter);
62621+ return atomic_long_read_unchecked(&nfs_attr_generation_counter);
62622 }
62623
62624 unsigned long nfs_inc_attr_generation_counter(void)
62625 {
62626- return atomic_long_inc_return(&nfs_attr_generation_counter);
62627+ return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
62628 }
62629
62630 void nfs_fattr_init(struct nfs_fattr *fattr)
62631diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
62632index 9a914e8..e89c0ea 100644
62633--- a/fs/nfsd/nfs4proc.c
62634+++ b/fs/nfsd/nfs4proc.c
62635@@ -1178,7 +1178,7 @@ struct nfsd4_operation {
62636 nfsd4op_rsize op_rsize_bop;
62637 stateid_getter op_get_currentstateid;
62638 stateid_setter op_set_currentstateid;
62639-};
62640+} __do_const;
62641
62642 static struct nfsd4_operation nfsd4_ops[];
62643
62644diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
62645index 16e8fa7..b0803f6 100644
62646--- a/fs/nfsd/nfs4xdr.c
62647+++ b/fs/nfsd/nfs4xdr.c
62648@@ -1531,7 +1531,7 @@ nfsd4_decode_notsupp(struct nfsd4_compoundargs *argp, void *p)
62649
62650 typedef __be32(*nfsd4_dec)(struct nfsd4_compoundargs *argp, void *);
62651
62652-static nfsd4_dec nfsd4_dec_ops[] = {
62653+static const nfsd4_dec nfsd4_dec_ops[] = {
62654 [OP_ACCESS] = (nfsd4_dec)nfsd4_decode_access,
62655 [OP_CLOSE] = (nfsd4_dec)nfsd4_decode_close,
62656 [OP_COMMIT] = (nfsd4_dec)nfsd4_decode_commit,
62657diff --git a/fs/nfsd/nfscache.c b/fs/nfsd/nfscache.c
62658index f8f060f..c4ba09a 100644
62659--- a/fs/nfsd/nfscache.c
62660+++ b/fs/nfsd/nfscache.c
62661@@ -519,14 +519,17 @@ nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp)
62662 {
62663 struct svc_cacherep *rp = rqstp->rq_cacherep;
62664 struct kvec *resv = &rqstp->rq_res.head[0], *cachv;
62665- int len;
62666+ long len;
62667 size_t bufsize = 0;
62668
62669 if (!rp)
62670 return;
62671
62672- len = resv->iov_len - ((char*)statp - (char*)resv->iov_base);
62673- len >>= 2;
62674+ if (statp) {
62675+ len = (char*)statp - (char*)resv->iov_base;
62676+ len = resv->iov_len - len;
62677+ len >>= 2;
62678+ }
62679
62680 /* Don't cache excessive amounts of data and XDR failures */
62681 if (!statp || len > (256 >> 2)) {
62682diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
62683index eea5ad1..5a84ac7 100644
62684--- a/fs/nfsd/vfs.c
62685+++ b/fs/nfsd/vfs.c
62686@@ -843,7 +843,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
62687 } else {
62688 oldfs = get_fs();
62689 set_fs(KERNEL_DS);
62690- host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
62691+ host_err = vfs_readv(file, (struct iovec __force_user *)vec, vlen, &offset);
62692 set_fs(oldfs);
62693 }
62694
62695@@ -934,7 +934,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
62696
62697 /* Write the data. */
62698 oldfs = get_fs(); set_fs(KERNEL_DS);
62699- host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &pos);
62700+ host_err = vfs_writev(file, (struct iovec __force_user *)vec, vlen, &pos);
62701 set_fs(oldfs);
62702 if (host_err < 0)
62703 goto out_nfserr;
62704@@ -1479,7 +1479,7 @@ nfsd_readlink(struct svc_rqst *rqstp, struct svc_fh *fhp, char *buf, int *lenp)
62705 */
62706
62707 oldfs = get_fs(); set_fs(KERNEL_DS);
62708- host_err = inode->i_op->readlink(path.dentry, (char __user *)buf, *lenp);
62709+ host_err = inode->i_op->readlink(path.dentry, (char __force_user *)buf, *lenp);
62710 set_fs(oldfs);
62711
62712 if (host_err < 0)
62713diff --git a/fs/nls/nls_base.c b/fs/nls/nls_base.c
62714index 52ccd34..43a53b1 100644
62715--- a/fs/nls/nls_base.c
62716+++ b/fs/nls/nls_base.c
62717@@ -234,21 +234,25 @@ EXPORT_SYMBOL(utf16s_to_utf8s);
62718
62719 int __register_nls(struct nls_table *nls, struct module *owner)
62720 {
62721- struct nls_table ** tmp = &tables;
62722+ struct nls_table *tmp = tables;
62723
62724 if (nls->next)
62725 return -EBUSY;
62726
62727- nls->owner = owner;
62728+ pax_open_kernel();
62729+ *(void **)&nls->owner = owner;
62730+ pax_close_kernel();
62731 spin_lock(&nls_lock);
62732- while (*tmp) {
62733- if (nls == *tmp) {
62734+ while (tmp) {
62735+ if (nls == tmp) {
62736 spin_unlock(&nls_lock);
62737 return -EBUSY;
62738 }
62739- tmp = &(*tmp)->next;
62740+ tmp = tmp->next;
62741 }
62742- nls->next = tables;
62743+ pax_open_kernel();
62744+ *(struct nls_table **)&nls->next = tables;
62745+ pax_close_kernel();
62746 tables = nls;
62747 spin_unlock(&nls_lock);
62748 return 0;
62749@@ -257,12 +261,14 @@ EXPORT_SYMBOL(__register_nls);
62750
62751 int unregister_nls(struct nls_table * nls)
62752 {
62753- struct nls_table ** tmp = &tables;
62754+ struct nls_table * const * tmp = &tables;
62755
62756 spin_lock(&nls_lock);
62757 while (*tmp) {
62758 if (nls == *tmp) {
62759- *tmp = nls->next;
62760+ pax_open_kernel();
62761+ *(struct nls_table **)tmp = nls->next;
62762+ pax_close_kernel();
62763 spin_unlock(&nls_lock);
62764 return 0;
62765 }
62766diff --git a/fs/nls/nls_euc-jp.c b/fs/nls/nls_euc-jp.c
62767index 162b3f1..6076a7c 100644
62768--- a/fs/nls/nls_euc-jp.c
62769+++ b/fs/nls/nls_euc-jp.c
62770@@ -560,8 +560,10 @@ static int __init init_nls_euc_jp(void)
62771 p_nls = load_nls("cp932");
62772
62773 if (p_nls) {
62774- table.charset2upper = p_nls->charset2upper;
62775- table.charset2lower = p_nls->charset2lower;
62776+ pax_open_kernel();
62777+ *(const unsigned char **)&table.charset2upper = p_nls->charset2upper;
62778+ *(const unsigned char **)&table.charset2lower = p_nls->charset2lower;
62779+ pax_close_kernel();
62780 return register_nls(&table);
62781 }
62782
62783diff --git a/fs/nls/nls_koi8-ru.c b/fs/nls/nls_koi8-ru.c
62784index a80a741..7b96e1b 100644
62785--- a/fs/nls/nls_koi8-ru.c
62786+++ b/fs/nls/nls_koi8-ru.c
62787@@ -62,8 +62,10 @@ static int __init init_nls_koi8_ru(void)
62788 p_nls = load_nls("koi8-u");
62789
62790 if (p_nls) {
62791- table.charset2upper = p_nls->charset2upper;
62792- table.charset2lower = p_nls->charset2lower;
62793+ pax_open_kernel();
62794+ *(const unsigned char **)&table.charset2upper = p_nls->charset2upper;
62795+ *(const unsigned char **)&table.charset2lower = p_nls->charset2lower;
62796+ pax_close_kernel();
62797 return register_nls(&table);
62798 }
62799
62800diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
62801index 287a22c..4e56e4e 100644
62802--- a/fs/notify/fanotify/fanotify_user.c
62803+++ b/fs/notify/fanotify/fanotify_user.c
62804@@ -251,8 +251,8 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
62805
62806 fd = fanotify_event_metadata.fd;
62807 ret = -EFAULT;
62808- if (copy_to_user(buf, &fanotify_event_metadata,
62809- fanotify_event_metadata.event_len))
62810+ if (fanotify_event_metadata.event_len > sizeof fanotify_event_metadata ||
62811+ copy_to_user(buf, &fanotify_event_metadata, fanotify_event_metadata.event_len))
62812 goto out_close_fd;
62813
62814 ret = prepare_for_access_response(group, event, fd);
62815@@ -742,6 +742,8 @@ SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags)
62816 oevent->path.mnt = NULL;
62817 oevent->path.dentry = NULL;
62818
62819+ if (force_o_largefile())
62820+ event_f_flags |= O_LARGEFILE;
62821 group->fanotify_data.f_flags = event_f_flags;
62822 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
62823 oevent->response = 0;
62824diff --git a/fs/notify/notification.c b/fs/notify/notification.c
62825index 1e58402..bb2d6f4 100644
62826--- a/fs/notify/notification.c
62827+++ b/fs/notify/notification.c
62828@@ -48,7 +48,7 @@
62829 #include <linux/fsnotify_backend.h>
62830 #include "fsnotify.h"
62831
62832-static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
62833+static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
62834
62835 /**
62836 * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
62837@@ -56,7 +56,7 @@ static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
62838 */
62839 u32 fsnotify_get_cookie(void)
62840 {
62841- return atomic_inc_return(&fsnotify_sync_cookie);
62842+ return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
62843 }
62844 EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
62845
62846diff --git a/fs/ntfs/dir.c b/fs/ntfs/dir.c
62847index 9e38daf..5727cae 100644
62848--- a/fs/ntfs/dir.c
62849+++ b/fs/ntfs/dir.c
62850@@ -1310,7 +1310,7 @@ find_next_index_buffer:
62851 ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
62852 ~(s64)(ndir->itype.index.block_size - 1)));
62853 /* Bounds checks. */
62854- if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
62855+ if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
62856 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
62857 "inode 0x%lx or driver bug.", vdir->i_ino);
62858 goto err_out;
62859diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
62860index db9bd8a..8338fb6 100644
62861--- a/fs/ntfs/file.c
62862+++ b/fs/ntfs/file.c
62863@@ -1282,7 +1282,7 @@ static inline size_t ntfs_copy_from_user(struct page **pages,
62864 char *addr;
62865 size_t total = 0;
62866 unsigned len;
62867- int left;
62868+ unsigned left;
62869
62870 do {
62871 len = PAGE_CACHE_SIZE - ofs;
62872diff --git a/fs/ntfs/super.c b/fs/ntfs/super.c
62873index 82650d5..db37dcf 100644
62874--- a/fs/ntfs/super.c
62875+++ b/fs/ntfs/super.c
62876@@ -685,7 +685,7 @@ static struct buffer_head *read_ntfs_boot_sector(struct super_block *sb,
62877 if (!silent)
62878 ntfs_error(sb, "Primary boot sector is invalid.");
62879 } else if (!silent)
62880- ntfs_error(sb, read_err_str, "primary");
62881+ ntfs_error(sb, read_err_str, "%s", "primary");
62882 if (!(NTFS_SB(sb)->on_errors & ON_ERRORS_RECOVER)) {
62883 if (bh_primary)
62884 brelse(bh_primary);
62885@@ -701,7 +701,7 @@ static struct buffer_head *read_ntfs_boot_sector(struct super_block *sb,
62886 goto hotfix_primary_boot_sector;
62887 brelse(bh_backup);
62888 } else if (!silent)
62889- ntfs_error(sb, read_err_str, "backup");
62890+ ntfs_error(sb, read_err_str, "%s", "backup");
62891 /* Try to read NT3.51- backup boot sector. */
62892 if ((bh_backup = sb_bread(sb, nr_blocks >> 1))) {
62893 if (is_boot_sector_ntfs(sb, (NTFS_BOOT_SECTOR*)
62894@@ -712,7 +712,7 @@ static struct buffer_head *read_ntfs_boot_sector(struct super_block *sb,
62895 "sector.");
62896 brelse(bh_backup);
62897 } else if (!silent)
62898- ntfs_error(sb, read_err_str, "backup");
62899+ ntfs_error(sb, read_err_str, "%s", "backup");
62900 /* We failed. Cleanup and return. */
62901 if (bh_primary)
62902 brelse(bh_primary);
62903diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
62904index 0440134..d52c93a 100644
62905--- a/fs/ocfs2/localalloc.c
62906+++ b/fs/ocfs2/localalloc.c
62907@@ -1320,7 +1320,7 @@ static int ocfs2_local_alloc_slide_window(struct ocfs2_super *osb,
62908 goto bail;
62909 }
62910
62911- atomic_inc(&osb->alloc_stats.moves);
62912+ atomic_inc_unchecked(&osb->alloc_stats.moves);
62913
62914 bail:
62915 if (handle)
62916diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
62917index 553f53c..aaf5133 100644
62918--- a/fs/ocfs2/ocfs2.h
62919+++ b/fs/ocfs2/ocfs2.h
62920@@ -235,11 +235,11 @@ enum ocfs2_vol_state
62921
62922 struct ocfs2_alloc_stats
62923 {
62924- atomic_t moves;
62925- atomic_t local_data;
62926- atomic_t bitmap_data;
62927- atomic_t bg_allocs;
62928- atomic_t bg_extends;
62929+ atomic_unchecked_t moves;
62930+ atomic_unchecked_t local_data;
62931+ atomic_unchecked_t bitmap_data;
62932+ atomic_unchecked_t bg_allocs;
62933+ atomic_unchecked_t bg_extends;
62934 };
62935
62936 enum ocfs2_local_alloc_state
62937diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
62938index 47ae266..6e8b793 100644
62939--- a/fs/ocfs2/suballoc.c
62940+++ b/fs/ocfs2/suballoc.c
62941@@ -866,7 +866,7 @@ static int ocfs2_reserve_suballoc_bits(struct ocfs2_super *osb,
62942 mlog_errno(status);
62943 goto bail;
62944 }
62945- atomic_inc(&osb->alloc_stats.bg_extends);
62946+ atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
62947
62948 /* You should never ask for this much metadata */
62949 BUG_ON(bits_wanted >
62950@@ -1992,7 +1992,7 @@ int ocfs2_claim_metadata(handle_t *handle,
62951 mlog_errno(status);
62952 goto bail;
62953 }
62954- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
62955+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
62956
62957 *suballoc_loc = res.sr_bg_blkno;
62958 *suballoc_bit_start = res.sr_bit_offset;
62959@@ -2156,7 +2156,7 @@ int ocfs2_claim_new_inode_at_loc(handle_t *handle,
62960 trace_ocfs2_claim_new_inode_at_loc((unsigned long long)di_blkno,
62961 res->sr_bits);
62962
62963- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
62964+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
62965
62966 BUG_ON(res->sr_bits != 1);
62967
62968@@ -2198,7 +2198,7 @@ int ocfs2_claim_new_inode(handle_t *handle,
62969 mlog_errno(status);
62970 goto bail;
62971 }
62972- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
62973+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
62974
62975 BUG_ON(res.sr_bits != 1);
62976
62977@@ -2302,7 +2302,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
62978 cluster_start,
62979 num_clusters);
62980 if (!status)
62981- atomic_inc(&osb->alloc_stats.local_data);
62982+ atomic_inc_unchecked(&osb->alloc_stats.local_data);
62983 } else {
62984 if (min_clusters > (osb->bitmap_cpg - 1)) {
62985 /* The only paths asking for contiguousness
62986@@ -2328,7 +2328,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
62987 ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
62988 res.sr_bg_blkno,
62989 res.sr_bit_offset);
62990- atomic_inc(&osb->alloc_stats.bitmap_data);
62991+ atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
62992 *num_clusters = res.sr_bits;
62993 }
62994 }
62995diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
62996index 49d84f8..4807e0b 100644
62997--- a/fs/ocfs2/super.c
62998+++ b/fs/ocfs2/super.c
62999@@ -300,11 +300,11 @@ static int ocfs2_osb_dump(struct ocfs2_super *osb, char *buf, int len)
63000 "%10s => GlobalAllocs: %d LocalAllocs: %d "
63001 "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
63002 "Stats",
63003- atomic_read(&osb->alloc_stats.bitmap_data),
63004- atomic_read(&osb->alloc_stats.local_data),
63005- atomic_read(&osb->alloc_stats.bg_allocs),
63006- atomic_read(&osb->alloc_stats.moves),
63007- atomic_read(&osb->alloc_stats.bg_extends));
63008+ atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
63009+ atomic_read_unchecked(&osb->alloc_stats.local_data),
63010+ atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
63011+ atomic_read_unchecked(&osb->alloc_stats.moves),
63012+ atomic_read_unchecked(&osb->alloc_stats.bg_extends));
63013
63014 out += snprintf(buf + out, len - out,
63015 "%10s => State: %u Descriptor: %llu Size: %u bits "
63016@@ -2123,11 +2123,11 @@ static int ocfs2_initialize_super(struct super_block *sb,
63017 spin_lock_init(&osb->osb_xattr_lock);
63018 ocfs2_init_steal_slots(osb);
63019
63020- atomic_set(&osb->alloc_stats.moves, 0);
63021- atomic_set(&osb->alloc_stats.local_data, 0);
63022- atomic_set(&osb->alloc_stats.bitmap_data, 0);
63023- atomic_set(&osb->alloc_stats.bg_allocs, 0);
63024- atomic_set(&osb->alloc_stats.bg_extends, 0);
63025+ atomic_set_unchecked(&osb->alloc_stats.moves, 0);
63026+ atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
63027+ atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
63028+ atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
63029+ atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
63030
63031 /* Copy the blockcheck stats from the superblock probe */
63032 osb->osb_ecc_stats = *stats;
63033diff --git a/fs/open.c b/fs/open.c
63034index 2ed7325..4e77ac3 100644
63035--- a/fs/open.c
63036+++ b/fs/open.c
63037@@ -32,6 +32,8 @@
63038 #include <linux/dnotify.h>
63039 #include <linux/compat.h>
63040
63041+#define CREATE_TRACE_POINTS
63042+#include <trace/events/fs.h>
63043 #include "internal.h"
63044
63045 int do_truncate(struct dentry *dentry, loff_t length, unsigned int time_attrs,
63046@@ -103,6 +105,8 @@ long vfs_truncate(struct path *path, loff_t length)
63047 error = locks_verify_truncate(inode, NULL, length);
63048 if (!error)
63049 error = security_path_truncate(path);
63050+ if (!error && !gr_acl_handle_truncate(path->dentry, path->mnt))
63051+ error = -EACCES;
63052 if (!error)
63053 error = do_truncate(path->dentry, length, 0, NULL);
63054
63055@@ -187,6 +191,8 @@ static long do_sys_ftruncate(unsigned int fd, loff_t length, int small)
63056 error = locks_verify_truncate(inode, f.file, length);
63057 if (!error)
63058 error = security_path_truncate(&f.file->f_path);
63059+ if (!error && !gr_acl_handle_truncate(f.file->f_path.dentry, f.file->f_path.mnt))
63060+ error = -EACCES;
63061 if (!error)
63062 error = do_truncate(dentry, length, ATTR_MTIME|ATTR_CTIME, f.file);
63063 sb_end_write(inode->i_sb);
63064@@ -361,6 +367,9 @@ retry:
63065 if (__mnt_is_readonly(path.mnt))
63066 res = -EROFS;
63067
63068+ if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
63069+ res = -EACCES;
63070+
63071 out_path_release:
63072 path_put(&path);
63073 if (retry_estale(res, lookup_flags)) {
63074@@ -392,6 +401,8 @@ retry:
63075 if (error)
63076 goto dput_and_out;
63077
63078+ gr_log_chdir(path.dentry, path.mnt);
63079+
63080 set_fs_pwd(current->fs, &path);
63081
63082 dput_and_out:
63083@@ -421,6 +432,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd)
63084 goto out_putf;
63085
63086 error = inode_permission(inode, MAY_EXEC | MAY_CHDIR);
63087+
63088+ if (!error && !gr_chroot_fchdir(f.file->f_path.dentry, f.file->f_path.mnt))
63089+ error = -EPERM;
63090+
63091+ if (!error)
63092+ gr_log_chdir(f.file->f_path.dentry, f.file->f_path.mnt);
63093+
63094 if (!error)
63095 set_fs_pwd(current->fs, &f.file->f_path);
63096 out_putf:
63097@@ -450,7 +468,13 @@ retry:
63098 if (error)
63099 goto dput_and_out;
63100
63101+ if (gr_handle_chroot_chroot(path.dentry, path.mnt))
63102+ goto dput_and_out;
63103+
63104 set_fs_root(current->fs, &path);
63105+
63106+ gr_handle_chroot_chdir(&path);
63107+
63108 error = 0;
63109 dput_and_out:
63110 path_put(&path);
63111@@ -474,6 +498,16 @@ static int chmod_common(struct path *path, umode_t mode)
63112 return error;
63113 retry_deleg:
63114 mutex_lock(&inode->i_mutex);
63115+
63116+ if (!gr_acl_handle_chmod(path->dentry, path->mnt, &mode)) {
63117+ error = -EACCES;
63118+ goto out_unlock;
63119+ }
63120+ if (gr_handle_chroot_chmod(path->dentry, path->mnt, mode)) {
63121+ error = -EACCES;
63122+ goto out_unlock;
63123+ }
63124+
63125 error = security_path_chmod(path, mode);
63126 if (error)
63127 goto out_unlock;
63128@@ -539,6 +573,9 @@ static int chown_common(struct path *path, uid_t user, gid_t group)
63129 uid = make_kuid(current_user_ns(), user);
63130 gid = make_kgid(current_user_ns(), group);
63131
63132+ if (!gr_acl_handle_chown(path->dentry, path->mnt))
63133+ return -EACCES;
63134+
63135 newattrs.ia_valid = ATTR_CTIME;
63136 if (user != (uid_t) -1) {
63137 if (!uid_valid(uid))
63138@@ -982,6 +1019,7 @@ long do_sys_open(int dfd, const char __user *filename, int flags, umode_t mode)
63139 } else {
63140 fsnotify_open(f);
63141 fd_install(fd, f);
63142+ trace_do_sys_open(tmp->name, flags, mode);
63143 }
63144 }
63145 putname(tmp);
63146diff --git a/fs/pipe.c b/fs/pipe.c
63147index 78fd0d0..f71fc09 100644
63148--- a/fs/pipe.c
63149+++ b/fs/pipe.c
63150@@ -56,7 +56,7 @@ unsigned int pipe_min_size = PAGE_SIZE;
63151
63152 static void pipe_lock_nested(struct pipe_inode_info *pipe, int subclass)
63153 {
63154- if (pipe->files)
63155+ if (atomic_read(&pipe->files))
63156 mutex_lock_nested(&pipe->mutex, subclass);
63157 }
63158
63159@@ -71,7 +71,7 @@ EXPORT_SYMBOL(pipe_lock);
63160
63161 void pipe_unlock(struct pipe_inode_info *pipe)
63162 {
63163- if (pipe->files)
63164+ if (atomic_read(&pipe->files))
63165 mutex_unlock(&pipe->mutex);
63166 }
63167 EXPORT_SYMBOL(pipe_unlock);
63168@@ -449,9 +449,9 @@ redo:
63169 }
63170 if (bufs) /* More to do? */
63171 continue;
63172- if (!pipe->writers)
63173+ if (!atomic_read(&pipe->writers))
63174 break;
63175- if (!pipe->waiting_writers) {
63176+ if (!atomic_read(&pipe->waiting_writers)) {
63177 /* syscall merging: Usually we must not sleep
63178 * if O_NONBLOCK is set, or if we got some data.
63179 * But if a writer sleeps in kernel space, then
63180@@ -513,7 +513,7 @@ pipe_write(struct kiocb *iocb, const struct iovec *_iov,
63181 ret = 0;
63182 __pipe_lock(pipe);
63183
63184- if (!pipe->readers) {
63185+ if (!atomic_read(&pipe->readers)) {
63186 send_sig(SIGPIPE, current, 0);
63187 ret = -EPIPE;
63188 goto out;
63189@@ -562,7 +562,7 @@ redo1:
63190 for (;;) {
63191 int bufs;
63192
63193- if (!pipe->readers) {
63194+ if (!atomic_read(&pipe->readers)) {
63195 send_sig(SIGPIPE, current, 0);
63196 if (!ret)
63197 ret = -EPIPE;
63198@@ -653,9 +653,9 @@ redo2:
63199 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
63200 do_wakeup = 0;
63201 }
63202- pipe->waiting_writers++;
63203+ atomic_inc(&pipe->waiting_writers);
63204 pipe_wait(pipe);
63205- pipe->waiting_writers--;
63206+ atomic_dec(&pipe->waiting_writers);
63207 }
63208 out:
63209 __pipe_unlock(pipe);
63210@@ -710,7 +710,7 @@ pipe_poll(struct file *filp, poll_table *wait)
63211 mask = 0;
63212 if (filp->f_mode & FMODE_READ) {
63213 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
63214- if (!pipe->writers && filp->f_version != pipe->w_counter)
63215+ if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
63216 mask |= POLLHUP;
63217 }
63218
63219@@ -720,7 +720,7 @@ pipe_poll(struct file *filp, poll_table *wait)
63220 * Most Unices do not set POLLERR for FIFOs but on Linux they
63221 * behave exactly like pipes for poll().
63222 */
63223- if (!pipe->readers)
63224+ if (!atomic_read(&pipe->readers))
63225 mask |= POLLERR;
63226 }
63227
63228@@ -732,7 +732,7 @@ static void put_pipe_info(struct inode *inode, struct pipe_inode_info *pipe)
63229 int kill = 0;
63230
63231 spin_lock(&inode->i_lock);
63232- if (!--pipe->files) {
63233+ if (atomic_dec_and_test(&pipe->files)) {
63234 inode->i_pipe = NULL;
63235 kill = 1;
63236 }
63237@@ -749,11 +749,11 @@ pipe_release(struct inode *inode, struct file *file)
63238
63239 __pipe_lock(pipe);
63240 if (file->f_mode & FMODE_READ)
63241- pipe->readers--;
63242+ atomic_dec(&pipe->readers);
63243 if (file->f_mode & FMODE_WRITE)
63244- pipe->writers--;
63245+ atomic_dec(&pipe->writers);
63246
63247- if (pipe->readers || pipe->writers) {
63248+ if (atomic_read(&pipe->readers) || atomic_read(&pipe->writers)) {
63249 wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM | POLLERR | POLLHUP);
63250 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
63251 kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
63252@@ -818,7 +818,7 @@ void free_pipe_info(struct pipe_inode_info *pipe)
63253 kfree(pipe);
63254 }
63255
63256-static struct vfsmount *pipe_mnt __read_mostly;
63257+struct vfsmount *pipe_mnt __read_mostly;
63258
63259 /*
63260 * pipefs_dname() is called from d_path().
63261@@ -848,8 +848,9 @@ static struct inode * get_pipe_inode(void)
63262 goto fail_iput;
63263
63264 inode->i_pipe = pipe;
63265- pipe->files = 2;
63266- pipe->readers = pipe->writers = 1;
63267+ atomic_set(&pipe->files, 2);
63268+ atomic_set(&pipe->readers, 1);
63269+ atomic_set(&pipe->writers, 1);
63270 inode->i_fop = &pipefifo_fops;
63271
63272 /*
63273@@ -1028,17 +1029,17 @@ static int fifo_open(struct inode *inode, struct file *filp)
63274 spin_lock(&inode->i_lock);
63275 if (inode->i_pipe) {
63276 pipe = inode->i_pipe;
63277- pipe->files++;
63278+ atomic_inc(&pipe->files);
63279 spin_unlock(&inode->i_lock);
63280 } else {
63281 spin_unlock(&inode->i_lock);
63282 pipe = alloc_pipe_info();
63283 if (!pipe)
63284 return -ENOMEM;
63285- pipe->files = 1;
63286+ atomic_set(&pipe->files, 1);
63287 spin_lock(&inode->i_lock);
63288 if (unlikely(inode->i_pipe)) {
63289- inode->i_pipe->files++;
63290+ atomic_inc(&inode->i_pipe->files);
63291 spin_unlock(&inode->i_lock);
63292 free_pipe_info(pipe);
63293 pipe = inode->i_pipe;
63294@@ -1063,10 +1064,10 @@ static int fifo_open(struct inode *inode, struct file *filp)
63295 * opened, even when there is no process writing the FIFO.
63296 */
63297 pipe->r_counter++;
63298- if (pipe->readers++ == 0)
63299+ if (atomic_inc_return(&pipe->readers) == 1)
63300 wake_up_partner(pipe);
63301
63302- if (!is_pipe && !pipe->writers) {
63303+ if (!is_pipe && !atomic_read(&pipe->writers)) {
63304 if ((filp->f_flags & O_NONBLOCK)) {
63305 /* suppress POLLHUP until we have
63306 * seen a writer */
63307@@ -1085,14 +1086,14 @@ static int fifo_open(struct inode *inode, struct file *filp)
63308 * errno=ENXIO when there is no process reading the FIFO.
63309 */
63310 ret = -ENXIO;
63311- if (!is_pipe && (filp->f_flags & O_NONBLOCK) && !pipe->readers)
63312+ if (!is_pipe && (filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
63313 goto err;
63314
63315 pipe->w_counter++;
63316- if (!pipe->writers++)
63317+ if (atomic_inc_return(&pipe->writers) == 1)
63318 wake_up_partner(pipe);
63319
63320- if (!is_pipe && !pipe->readers) {
63321+ if (!is_pipe && !atomic_read(&pipe->readers)) {
63322 if (wait_for_partner(pipe, &pipe->r_counter))
63323 goto err_wr;
63324 }
63325@@ -1106,11 +1107,11 @@ static int fifo_open(struct inode *inode, struct file *filp)
63326 * the process can at least talk to itself.
63327 */
63328
63329- pipe->readers++;
63330- pipe->writers++;
63331+ atomic_inc(&pipe->readers);
63332+ atomic_inc(&pipe->writers);
63333 pipe->r_counter++;
63334 pipe->w_counter++;
63335- if (pipe->readers == 1 || pipe->writers == 1)
63336+ if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
63337 wake_up_partner(pipe);
63338 break;
63339
63340@@ -1124,13 +1125,13 @@ static int fifo_open(struct inode *inode, struct file *filp)
63341 return 0;
63342
63343 err_rd:
63344- if (!--pipe->readers)
63345+ if (atomic_dec_and_test(&pipe->readers))
63346 wake_up_interruptible(&pipe->wait);
63347 ret = -ERESTARTSYS;
63348 goto err;
63349
63350 err_wr:
63351- if (!--pipe->writers)
63352+ if (atomic_dec_and_test(&pipe->writers))
63353 wake_up_interruptible(&pipe->wait);
63354 ret = -ERESTARTSYS;
63355 goto err;
63356diff --git a/fs/posix_acl.c b/fs/posix_acl.c
63357index 0855f77..6787d50 100644
63358--- a/fs/posix_acl.c
63359+++ b/fs/posix_acl.c
63360@@ -20,6 +20,7 @@
63361 #include <linux/xattr.h>
63362 #include <linux/export.h>
63363 #include <linux/user_namespace.h>
63364+#include <linux/grsecurity.h>
63365
63366 struct posix_acl **acl_by_type(struct inode *inode, int type)
63367 {
63368@@ -277,7 +278,7 @@ posix_acl_equiv_mode(const struct posix_acl *acl, umode_t *mode_p)
63369 }
63370 }
63371 if (mode_p)
63372- *mode_p = (*mode_p & ~S_IRWXUGO) | mode;
63373+ *mode_p = ((*mode_p & ~S_IRWXUGO) | mode) & ~gr_acl_umask();
63374 return not_equiv;
63375 }
63376 EXPORT_SYMBOL(posix_acl_equiv_mode);
63377@@ -427,7 +428,7 @@ static int posix_acl_create_masq(struct posix_acl *acl, umode_t *mode_p)
63378 mode &= (group_obj->e_perm << 3) | ~S_IRWXG;
63379 }
63380
63381- *mode_p = (*mode_p & ~S_IRWXUGO) | mode;
63382+ *mode_p = ((*mode_p & ~S_IRWXUGO) | mode) & ~gr_acl_umask();
63383 return not_equiv;
63384 }
63385
63386@@ -485,6 +486,8 @@ __posix_acl_create(struct posix_acl **acl, gfp_t gfp, umode_t *mode_p)
63387 struct posix_acl *clone = posix_acl_clone(*acl, gfp);
63388 int err = -ENOMEM;
63389 if (clone) {
63390+ *mode_p &= ~gr_acl_umask();
63391+
63392 err = posix_acl_create_masq(clone, mode_p);
63393 if (err < 0) {
63394 posix_acl_release(clone);
63395@@ -659,11 +662,12 @@ struct posix_acl *
63396 posix_acl_from_xattr(struct user_namespace *user_ns,
63397 const void *value, size_t size)
63398 {
63399- posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
63400- posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
63401+ const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
63402+ const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
63403 int count;
63404 struct posix_acl *acl;
63405 struct posix_acl_entry *acl_e;
63406+ umode_t umask = gr_acl_umask();
63407
63408 if (!value)
63409 return NULL;
63410@@ -689,12 +693,18 @@ posix_acl_from_xattr(struct user_namespace *user_ns,
63411
63412 switch(acl_e->e_tag) {
63413 case ACL_USER_OBJ:
63414+ acl_e->e_perm &= ~((umask & S_IRWXU) >> 6);
63415+ break;
63416 case ACL_GROUP_OBJ:
63417 case ACL_MASK:
63418+ acl_e->e_perm &= ~((umask & S_IRWXG) >> 3);
63419+ break;
63420 case ACL_OTHER:
63421+ acl_e->e_perm &= ~(umask & S_IRWXO);
63422 break;
63423
63424 case ACL_USER:
63425+ acl_e->e_perm &= ~((umask & S_IRWXU) >> 6);
63426 acl_e->e_uid =
63427 make_kuid(user_ns,
63428 le32_to_cpu(entry->e_id));
63429@@ -702,6 +712,7 @@ posix_acl_from_xattr(struct user_namespace *user_ns,
63430 goto fail;
63431 break;
63432 case ACL_GROUP:
63433+ acl_e->e_perm &= ~((umask & S_IRWXG) >> 3);
63434 acl_e->e_gid =
63435 make_kgid(user_ns,
63436 le32_to_cpu(entry->e_id));
63437diff --git a/fs/proc/Kconfig b/fs/proc/Kconfig
63438index 2183fcf..3c32a98 100644
63439--- a/fs/proc/Kconfig
63440+++ b/fs/proc/Kconfig
63441@@ -30,7 +30,7 @@ config PROC_FS
63442
63443 config PROC_KCORE
63444 bool "/proc/kcore support" if !ARM
63445- depends on PROC_FS && MMU
63446+ depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
63447 help
63448 Provides a virtual ELF core file of the live kernel. This can
63449 be read with gdb and other ELF tools. No modifications can be
63450@@ -38,8 +38,8 @@ config PROC_KCORE
63451
63452 config PROC_VMCORE
63453 bool "/proc/vmcore support"
63454- depends on PROC_FS && CRASH_DUMP
63455- default y
63456+ depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
63457+ default n
63458 help
63459 Exports the dump image of crashed kernel in ELF format.
63460
63461@@ -63,8 +63,8 @@ config PROC_SYSCTL
63462 limited in memory.
63463
63464 config PROC_PAGE_MONITOR
63465- default y
63466- depends on PROC_FS && MMU
63467+ default n
63468+ depends on PROC_FS && MMU && !GRKERNSEC
63469 bool "Enable /proc page monitoring" if EXPERT
63470 help
63471 Various /proc files exist to monitor process memory utilization:
63472diff --git a/fs/proc/array.c b/fs/proc/array.c
63473index 656e401..b5b86b9 100644
63474--- a/fs/proc/array.c
63475+++ b/fs/proc/array.c
63476@@ -60,6 +60,7 @@
63477 #include <linux/tty.h>
63478 #include <linux/string.h>
63479 #include <linux/mman.h>
63480+#include <linux/grsecurity.h>
63481 #include <linux/proc_fs.h>
63482 #include <linux/ioport.h>
63483 #include <linux/uaccess.h>
63484@@ -356,6 +357,21 @@ static void task_cpus_allowed(struct seq_file *m, struct task_struct *task)
63485 seq_putc(m, '\n');
63486 }
63487
63488+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
63489+static inline void task_pax(struct seq_file *m, struct task_struct *p)
63490+{
63491+ if (p->mm)
63492+ seq_printf(m, "PaX:\t%c%c%c%c%c\n",
63493+ p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
63494+ p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
63495+ p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
63496+ p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
63497+ p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
63498+ else
63499+ seq_printf(m, "PaX:\t-----\n");
63500+}
63501+#endif
63502+
63503 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
63504 struct pid *pid, struct task_struct *task)
63505 {
63506@@ -374,9 +390,24 @@ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
63507 task_cpus_allowed(m, task);
63508 cpuset_task_status_allowed(m, task);
63509 task_context_switch_counts(m, task);
63510+
63511+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
63512+ task_pax(m, task);
63513+#endif
63514+
63515+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
63516+ task_grsec_rbac(m, task);
63517+#endif
63518+
63519 return 0;
63520 }
63521
63522+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
63523+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
63524+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
63525+ _mm->pax_flags & MF_PAX_SEGMEXEC))
63526+#endif
63527+
63528 static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
63529 struct pid *pid, struct task_struct *task, int whole)
63530 {
63531@@ -398,6 +429,13 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
63532 char tcomm[sizeof(task->comm)];
63533 unsigned long flags;
63534
63535+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
63536+ if (current->exec_id != m->exec_id) {
63537+ gr_log_badprocpid("stat");
63538+ return 0;
63539+ }
63540+#endif
63541+
63542 state = *get_task_state(task);
63543 vsize = eip = esp = 0;
63544 permitted = ptrace_may_access(task, PTRACE_MODE_READ | PTRACE_MODE_NOAUDIT);
63545@@ -468,6 +506,19 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
63546 gtime = task_gtime(task);
63547 }
63548
63549+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
63550+ if (PAX_RAND_FLAGS(mm)) {
63551+ eip = 0;
63552+ esp = 0;
63553+ wchan = 0;
63554+ }
63555+#endif
63556+#ifdef CONFIG_GRKERNSEC_HIDESYM
63557+ wchan = 0;
63558+ eip =0;
63559+ esp =0;
63560+#endif
63561+
63562 /* scale priority and nice values from timeslices to -20..20 */
63563 /* to make it look like a "normal" Unix priority/nice value */
63564 priority = task_prio(task);
63565@@ -504,9 +555,15 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
63566 seq_put_decimal_ull(m, ' ', vsize);
63567 seq_put_decimal_ull(m, ' ', mm ? get_mm_rss(mm) : 0);
63568 seq_put_decimal_ull(m, ' ', rsslim);
63569+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
63570+ seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0));
63571+ seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0));
63572+ seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0));
63573+#else
63574 seq_put_decimal_ull(m, ' ', mm ? (permitted ? mm->start_code : 1) : 0);
63575 seq_put_decimal_ull(m, ' ', mm ? (permitted ? mm->end_code : 1) : 0);
63576 seq_put_decimal_ull(m, ' ', (permitted && mm) ? mm->start_stack : 0);
63577+#endif
63578 seq_put_decimal_ull(m, ' ', esp);
63579 seq_put_decimal_ull(m, ' ', eip);
63580 /* The signal information here is obsolete.
63581@@ -528,7 +585,11 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
63582 seq_put_decimal_ull(m, ' ', cputime_to_clock_t(gtime));
63583 seq_put_decimal_ll(m, ' ', cputime_to_clock_t(cgtime));
63584
63585- if (mm && permitted) {
63586+ if (mm && permitted
63587+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
63588+ && !PAX_RAND_FLAGS(mm)
63589+#endif
63590+ ) {
63591 seq_put_decimal_ull(m, ' ', mm->start_data);
63592 seq_put_decimal_ull(m, ' ', mm->end_data);
63593 seq_put_decimal_ull(m, ' ', mm->start_brk);
63594@@ -566,8 +627,15 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
63595 struct pid *pid, struct task_struct *task)
63596 {
63597 unsigned long size = 0, resident = 0, shared = 0, text = 0, data = 0;
63598- struct mm_struct *mm = get_task_mm(task);
63599+ struct mm_struct *mm;
63600
63601+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
63602+ if (current->exec_id != m->exec_id) {
63603+ gr_log_badprocpid("statm");
63604+ return 0;
63605+ }
63606+#endif
63607+ mm = get_task_mm(task);
63608 if (mm) {
63609 size = task_statm(mm, &shared, &text, &data, &resident);
63610 mmput(mm);
63611@@ -590,6 +658,13 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
63612 return 0;
63613 }
63614
63615+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
63616+int proc_pid_ipaddr(struct task_struct *task, char *buffer)
63617+{
63618+ return sprintf(buffer, "%pI4\n", &task->signal->curr_ip);
63619+}
63620+#endif
63621+
63622 #ifdef CONFIG_CHECKPOINT_RESTORE
63623 static struct pid *
63624 get_children_pid(struct inode *inode, struct pid *pid_prev, loff_t pos)
63625diff --git a/fs/proc/base.c b/fs/proc/base.c
63626index b976062..584d0bc 100644
63627--- a/fs/proc/base.c
63628+++ b/fs/proc/base.c
63629@@ -113,6 +113,14 @@ struct pid_entry {
63630 union proc_op op;
63631 };
63632
63633+struct getdents_callback {
63634+ struct linux_dirent __user * current_dir;
63635+ struct linux_dirent __user * previous;
63636+ struct file * file;
63637+ int count;
63638+ int error;
63639+};
63640+
63641 #define NOD(NAME, MODE, IOP, FOP, OP) { \
63642 .name = (NAME), \
63643 .len = sizeof(NAME) - 1, \
63644@@ -210,6 +218,9 @@ static int proc_pid_cmdline(struct task_struct *task, char * buffer)
63645 if (!mm->arg_end)
63646 goto out_mm; /* Shh! No looking before we're done */
63647
63648+ if (gr_acl_handle_procpidmem(task))
63649+ goto out_mm;
63650+
63651 len = mm->arg_end - mm->arg_start;
63652
63653 if (len > PAGE_SIZE)
63654@@ -237,12 +248,28 @@ out:
63655 return res;
63656 }
63657
63658+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
63659+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
63660+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
63661+ _mm->pax_flags & MF_PAX_SEGMEXEC))
63662+#endif
63663+
63664 static int proc_pid_auxv(struct task_struct *task, char *buffer)
63665 {
63666 struct mm_struct *mm = mm_access(task, PTRACE_MODE_READ);
63667 int res = PTR_ERR(mm);
63668 if (mm && !IS_ERR(mm)) {
63669 unsigned int nwords = 0;
63670+
63671+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
63672+ /* allow if we're currently ptracing this task */
63673+ if (PAX_RAND_FLAGS(mm) &&
63674+ (!(task->ptrace & PT_PTRACED) || (task->parent != current))) {
63675+ mmput(mm);
63676+ return 0;
63677+ }
63678+#endif
63679+
63680 do {
63681 nwords += 2;
63682 } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
63683@@ -256,7 +283,7 @@ static int proc_pid_auxv(struct task_struct *task, char *buffer)
63684 }
63685
63686
63687-#ifdef CONFIG_KALLSYMS
63688+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
63689 /*
63690 * Provides a wchan file via kallsyms in a proper one-value-per-file format.
63691 * Returns the resolved symbol. If that fails, simply return the address.
63692@@ -295,7 +322,7 @@ static void unlock_trace(struct task_struct *task)
63693 mutex_unlock(&task->signal->cred_guard_mutex);
63694 }
63695
63696-#ifdef CONFIG_STACKTRACE
63697+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
63698
63699 #define MAX_STACK_TRACE_DEPTH 64
63700
63701@@ -518,7 +545,7 @@ static int proc_pid_limits(struct task_struct *task, char *buffer)
63702 return count;
63703 }
63704
63705-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
63706+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
63707 static int proc_pid_syscall(struct task_struct *task, char *buffer)
63708 {
63709 long nr;
63710@@ -547,7 +574,7 @@ static int proc_pid_syscall(struct task_struct *task, char *buffer)
63711 /************************************************************************/
63712
63713 /* permission checks */
63714-static int proc_fd_access_allowed(struct inode *inode)
63715+static int proc_fd_access_allowed(struct inode *inode, unsigned int log)
63716 {
63717 struct task_struct *task;
63718 int allowed = 0;
63719@@ -557,7 +584,10 @@ static int proc_fd_access_allowed(struct inode *inode)
63720 */
63721 task = get_proc_task(inode);
63722 if (task) {
63723- allowed = ptrace_may_access(task, PTRACE_MODE_READ);
63724+ if (log)
63725+ allowed = ptrace_may_access(task, PTRACE_MODE_READ);
63726+ else
63727+ allowed = ptrace_may_access(task, PTRACE_MODE_READ | PTRACE_MODE_NOAUDIT);
63728 put_task_struct(task);
63729 }
63730 return allowed;
63731@@ -588,10 +618,35 @@ static bool has_pid_permissions(struct pid_namespace *pid,
63732 struct task_struct *task,
63733 int hide_pid_min)
63734 {
63735+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
63736+ return false;
63737+
63738+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
63739+ rcu_read_lock();
63740+ {
63741+ const struct cred *tmpcred = current_cred();
63742+ const struct cred *cred = __task_cred(task);
63743+
63744+ if (uid_eq(tmpcred->uid, GLOBAL_ROOT_UID) || uid_eq(tmpcred->uid, cred->uid)
63745+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
63746+ || in_group_p(grsec_proc_gid)
63747+#endif
63748+ ) {
63749+ rcu_read_unlock();
63750+ return true;
63751+ }
63752+ }
63753+ rcu_read_unlock();
63754+
63755+ if (!pid->hide_pid)
63756+ return false;
63757+#endif
63758+
63759 if (pid->hide_pid < hide_pid_min)
63760 return true;
63761 if (in_group_p(pid->pid_gid))
63762 return true;
63763+
63764 return ptrace_may_access(task, PTRACE_MODE_READ);
63765 }
63766
63767@@ -609,7 +664,11 @@ static int proc_pid_permission(struct inode *inode, int mask)
63768 put_task_struct(task);
63769
63770 if (!has_perms) {
63771+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
63772+ {
63773+#else
63774 if (pid->hide_pid == 2) {
63775+#endif
63776 /*
63777 * Let's make getdents(), stat(), and open()
63778 * consistent with each other. If a process
63779@@ -707,6 +766,11 @@ static int __mem_open(struct inode *inode, struct file *file, unsigned int mode)
63780 if (!task)
63781 return -ESRCH;
63782
63783+ if (gr_acl_handle_procpidmem(task)) {
63784+ put_task_struct(task);
63785+ return -EPERM;
63786+ }
63787+
63788 mm = mm_access(task, mode);
63789 put_task_struct(task);
63790
63791@@ -722,6 +786,10 @@ static int __mem_open(struct inode *inode, struct file *file, unsigned int mode)
63792
63793 file->private_data = mm;
63794
63795+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
63796+ file->f_version = current->exec_id;
63797+#endif
63798+
63799 return 0;
63800 }
63801
63802@@ -743,6 +811,17 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
63803 ssize_t copied;
63804 char *page;
63805
63806+#ifdef CONFIG_GRKERNSEC
63807+ if (write)
63808+ return -EPERM;
63809+#endif
63810+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
63811+ if (file->f_version != current->exec_id) {
63812+ gr_log_badprocpid("mem");
63813+ return 0;
63814+ }
63815+#endif
63816+
63817 if (!mm)
63818 return 0;
63819
63820@@ -755,7 +834,7 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
63821 goto free;
63822
63823 while (count > 0) {
63824- int this_len = min_t(int, count, PAGE_SIZE);
63825+ ssize_t this_len = min_t(ssize_t, count, PAGE_SIZE);
63826
63827 if (write && copy_from_user(page, buf, this_len)) {
63828 copied = -EFAULT;
63829@@ -847,6 +926,13 @@ static ssize_t environ_read(struct file *file, char __user *buf,
63830 if (!mm)
63831 return 0;
63832
63833+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
63834+ if (file->f_version != current->exec_id) {
63835+ gr_log_badprocpid("environ");
63836+ return 0;
63837+ }
63838+#endif
63839+
63840 page = (char *)__get_free_page(GFP_TEMPORARY);
63841 if (!page)
63842 return -ENOMEM;
63843@@ -856,7 +942,7 @@ static ssize_t environ_read(struct file *file, char __user *buf,
63844 goto free;
63845 while (count > 0) {
63846 size_t this_len, max_len;
63847- int retval;
63848+ ssize_t retval;
63849
63850 if (src >= (mm->env_end - mm->env_start))
63851 break;
63852@@ -1467,7 +1553,7 @@ static void *proc_pid_follow_link(struct dentry *dentry, struct nameidata *nd)
63853 int error = -EACCES;
63854
63855 /* Are we allowed to snoop on the tasks file descriptors? */
63856- if (!proc_fd_access_allowed(inode))
63857+ if (!proc_fd_access_allowed(inode, 0))
63858 goto out;
63859
63860 error = PROC_I(inode)->op.proc_get_link(dentry, &path);
63861@@ -1511,8 +1597,18 @@ static int proc_pid_readlink(struct dentry * dentry, char __user * buffer, int b
63862 struct path path;
63863
63864 /* Are we allowed to snoop on the tasks file descriptors? */
63865- if (!proc_fd_access_allowed(inode))
63866- goto out;
63867+ /* logging this is needed for learning on chromium to work properly,
63868+ but we don't want to flood the logs from 'ps' which does a readlink
63869+ on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn
63870+ CAP_SYS_PTRACE as it's not necessary for its basic functionality
63871+ */
63872+ if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') {
63873+ if (!proc_fd_access_allowed(inode,0))
63874+ goto out;
63875+ } else {
63876+ if (!proc_fd_access_allowed(inode,1))
63877+ goto out;
63878+ }
63879
63880 error = PROC_I(inode)->op.proc_get_link(dentry, &path);
63881 if (error)
63882@@ -1562,7 +1658,11 @@ struct inode *proc_pid_make_inode(struct super_block * sb, struct task_struct *t
63883 rcu_read_lock();
63884 cred = __task_cred(task);
63885 inode->i_uid = cred->euid;
63886+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
63887+ inode->i_gid = grsec_proc_gid;
63888+#else
63889 inode->i_gid = cred->egid;
63890+#endif
63891 rcu_read_unlock();
63892 }
63893 security_task_to_inode(task, inode);
63894@@ -1598,10 +1698,19 @@ int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
63895 return -ENOENT;
63896 }
63897 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
63898+#ifdef CONFIG_GRKERNSEC_PROC_USER
63899+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
63900+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
63901+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
63902+#endif
63903 task_dumpable(task)) {
63904 cred = __task_cred(task);
63905 stat->uid = cred->euid;
63906+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
63907+ stat->gid = grsec_proc_gid;
63908+#else
63909 stat->gid = cred->egid;
63910+#endif
63911 }
63912 }
63913 rcu_read_unlock();
63914@@ -1639,11 +1748,20 @@ int pid_revalidate(struct dentry *dentry, unsigned int flags)
63915
63916 if (task) {
63917 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
63918+#ifdef CONFIG_GRKERNSEC_PROC_USER
63919+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
63920+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
63921+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
63922+#endif
63923 task_dumpable(task)) {
63924 rcu_read_lock();
63925 cred = __task_cred(task);
63926 inode->i_uid = cred->euid;
63927+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
63928+ inode->i_gid = grsec_proc_gid;
63929+#else
63930 inode->i_gid = cred->egid;
63931+#endif
63932 rcu_read_unlock();
63933 } else {
63934 inode->i_uid = GLOBAL_ROOT_UID;
63935@@ -2178,6 +2296,9 @@ static struct dentry *proc_pident_lookup(struct inode *dir,
63936 if (!task)
63937 goto out_no_task;
63938
63939+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
63940+ goto out;
63941+
63942 /*
63943 * Yes, it does not scale. And it should not. Don't add
63944 * new entries into /proc/<tgid>/ without very good reasons.
63945@@ -2208,6 +2329,9 @@ static int proc_pident_readdir(struct file *file, struct dir_context *ctx,
63946 if (!task)
63947 return -ENOENT;
63948
63949+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
63950+ goto out;
63951+
63952 if (!dir_emit_dots(file, ctx))
63953 goto out;
63954
63955@@ -2597,7 +2721,7 @@ static const struct pid_entry tgid_base_stuff[] = {
63956 REG("autogroup", S_IRUGO|S_IWUSR, proc_pid_sched_autogroup_operations),
63957 #endif
63958 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
63959-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
63960+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
63961 INF("syscall", S_IRUGO, proc_pid_syscall),
63962 #endif
63963 INF("cmdline", S_IRUGO, proc_pid_cmdline),
63964@@ -2622,10 +2746,10 @@ static const struct pid_entry tgid_base_stuff[] = {
63965 #ifdef CONFIG_SECURITY
63966 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
63967 #endif
63968-#ifdef CONFIG_KALLSYMS
63969+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
63970 INF("wchan", S_IRUGO, proc_pid_wchan),
63971 #endif
63972-#ifdef CONFIG_STACKTRACE
63973+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
63974 ONE("stack", S_IRUGO, proc_pid_stack),
63975 #endif
63976 #ifdef CONFIG_SCHEDSTATS
63977@@ -2659,6 +2783,9 @@ static const struct pid_entry tgid_base_stuff[] = {
63978 #ifdef CONFIG_HARDWALL
63979 INF("hardwall", S_IRUGO, proc_pid_hardwall),
63980 #endif
63981+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
63982+ INF("ipaddr", S_IRUSR, proc_pid_ipaddr),
63983+#endif
63984 #ifdef CONFIG_USER_NS
63985 REG("uid_map", S_IRUGO|S_IWUSR, proc_uid_map_operations),
63986 REG("gid_map", S_IRUGO|S_IWUSR, proc_gid_map_operations),
63987@@ -2789,7 +2916,14 @@ static int proc_pid_instantiate(struct inode *dir,
63988 if (!inode)
63989 goto out;
63990
63991+#ifdef CONFIG_GRKERNSEC_PROC_USER
63992+ inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
63993+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
63994+ inode->i_gid = grsec_proc_gid;
63995+ inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
63996+#else
63997 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
63998+#endif
63999 inode->i_op = &proc_tgid_base_inode_operations;
64000 inode->i_fop = &proc_tgid_base_operations;
64001 inode->i_flags|=S_IMMUTABLE;
64002@@ -2827,7 +2961,11 @@ struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, unsign
64003 if (!task)
64004 goto out;
64005
64006+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
64007+ goto out_put_task;
64008+
64009 result = proc_pid_instantiate(dir, dentry, task, NULL);
64010+out_put_task:
64011 put_task_struct(task);
64012 out:
64013 return ERR_PTR(result);
64014@@ -2933,7 +3071,7 @@ static const struct pid_entry tid_base_stuff[] = {
64015 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
64016 #endif
64017 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
64018-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
64019+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
64020 INF("syscall", S_IRUGO, proc_pid_syscall),
64021 #endif
64022 INF("cmdline", S_IRUGO, proc_pid_cmdline),
64023@@ -2960,10 +3098,10 @@ static const struct pid_entry tid_base_stuff[] = {
64024 #ifdef CONFIG_SECURITY
64025 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
64026 #endif
64027-#ifdef CONFIG_KALLSYMS
64028+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
64029 INF("wchan", S_IRUGO, proc_pid_wchan),
64030 #endif
64031-#ifdef CONFIG_STACKTRACE
64032+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
64033 ONE("stack", S_IRUGO, proc_pid_stack),
64034 #endif
64035 #ifdef CONFIG_SCHEDSTATS
64036diff --git a/fs/proc/cmdline.c b/fs/proc/cmdline.c
64037index cbd82df..c0407d2 100644
64038--- a/fs/proc/cmdline.c
64039+++ b/fs/proc/cmdline.c
64040@@ -23,7 +23,11 @@ static const struct file_operations cmdline_proc_fops = {
64041
64042 static int __init proc_cmdline_init(void)
64043 {
64044+#ifdef CONFIG_GRKERNSEC_PROC_ADD
64045+ proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
64046+#else
64047 proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
64048+#endif
64049 return 0;
64050 }
64051 fs_initcall(proc_cmdline_init);
64052diff --git a/fs/proc/devices.c b/fs/proc/devices.c
64053index 50493ed..248166b 100644
64054--- a/fs/proc/devices.c
64055+++ b/fs/proc/devices.c
64056@@ -64,7 +64,11 @@ static const struct file_operations proc_devinfo_operations = {
64057
64058 static int __init proc_devices_init(void)
64059 {
64060+#ifdef CONFIG_GRKERNSEC_PROC_ADD
64061+ proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
64062+#else
64063 proc_create("devices", 0, NULL, &proc_devinfo_operations);
64064+#endif
64065 return 0;
64066 }
64067 fs_initcall(proc_devices_init);
64068diff --git a/fs/proc/fd.c b/fs/proc/fd.c
64069index 985ea88..d118a0a 100644
64070--- a/fs/proc/fd.c
64071+++ b/fs/proc/fd.c
64072@@ -25,7 +25,8 @@ static int seq_show(struct seq_file *m, void *v)
64073 if (!task)
64074 return -ENOENT;
64075
64076- files = get_files_struct(task);
64077+ if (!gr_acl_handle_procpidmem(task))
64078+ files = get_files_struct(task);
64079 put_task_struct(task);
64080
64081 if (files) {
64082@@ -283,11 +284,21 @@ static struct dentry *proc_lookupfd(struct inode *dir, struct dentry *dentry,
64083 */
64084 int proc_fd_permission(struct inode *inode, int mask)
64085 {
64086+ struct task_struct *task;
64087 int rv = generic_permission(inode, mask);
64088- if (rv == 0)
64089- return 0;
64090+
64091 if (task_tgid(current) == proc_pid(inode))
64092 rv = 0;
64093+
64094+ task = get_proc_task(inode);
64095+ if (task == NULL)
64096+ return rv;
64097+
64098+ if (gr_acl_handle_procpidmem(task))
64099+ rv = -EACCES;
64100+
64101+ put_task_struct(task);
64102+
64103 return rv;
64104 }
64105
64106diff --git a/fs/proc/generic.c b/fs/proc/generic.c
64107index b7f268e..3bea6b7 100644
64108--- a/fs/proc/generic.c
64109+++ b/fs/proc/generic.c
64110@@ -23,6 +23,7 @@
64111 #include <linux/bitops.h>
64112 #include <linux/spinlock.h>
64113 #include <linux/completion.h>
64114+#include <linux/grsecurity.h>
64115 #include <asm/uaccess.h>
64116
64117 #include "internal.h"
64118@@ -207,6 +208,15 @@ struct dentry *proc_lookup(struct inode *dir, struct dentry *dentry,
64119 return proc_lookup_de(PDE(dir), dir, dentry);
64120 }
64121
64122+struct dentry *proc_lookup_restrict(struct inode *dir, struct dentry *dentry,
64123+ unsigned int flags)
64124+{
64125+ if (gr_proc_is_restricted())
64126+ return ERR_PTR(-EACCES);
64127+
64128+ return proc_lookup_de(PDE(dir), dir, dentry);
64129+}
64130+
64131 /*
64132 * This returns non-zero if at EOF, so that the /proc
64133 * root directory can use this and check if it should
64134@@ -264,6 +274,16 @@ int proc_readdir(struct file *file, struct dir_context *ctx)
64135 return proc_readdir_de(PDE(inode), file, ctx);
64136 }
64137
64138+int proc_readdir_restrict(struct file *file, struct dir_context *ctx)
64139+{
64140+ struct inode *inode = file_inode(file);
64141+
64142+ if (gr_proc_is_restricted())
64143+ return -EACCES;
64144+
64145+ return proc_readdir_de(PDE(inode), file, ctx);
64146+}
64147+
64148 /*
64149 * These are the generic /proc directory operations. They
64150 * use the in-memory "struct proc_dir_entry" tree to parse
64151@@ -275,6 +295,12 @@ static const struct file_operations proc_dir_operations = {
64152 .iterate = proc_readdir,
64153 };
64154
64155+static const struct file_operations proc_dir_restricted_operations = {
64156+ .llseek = generic_file_llseek,
64157+ .read = generic_read_dir,
64158+ .iterate = proc_readdir_restrict,
64159+};
64160+
64161 /*
64162 * proc directories can do almost nothing..
64163 */
64164@@ -284,6 +310,12 @@ static const struct inode_operations proc_dir_inode_operations = {
64165 .setattr = proc_notify_change,
64166 };
64167
64168+static const struct inode_operations proc_dir_restricted_inode_operations = {
64169+ .lookup = proc_lookup_restrict,
64170+ .getattr = proc_getattr,
64171+ .setattr = proc_notify_change,
64172+};
64173+
64174 static int proc_register(struct proc_dir_entry * dir, struct proc_dir_entry * dp)
64175 {
64176 struct proc_dir_entry *tmp;
64177@@ -294,8 +326,13 @@ static int proc_register(struct proc_dir_entry * dir, struct proc_dir_entry * dp
64178 return ret;
64179
64180 if (S_ISDIR(dp->mode)) {
64181- dp->proc_fops = &proc_dir_operations;
64182- dp->proc_iops = &proc_dir_inode_operations;
64183+ if (dp->restricted) {
64184+ dp->proc_fops = &proc_dir_restricted_operations;
64185+ dp->proc_iops = &proc_dir_restricted_inode_operations;
64186+ } else {
64187+ dp->proc_fops = &proc_dir_operations;
64188+ dp->proc_iops = &proc_dir_inode_operations;
64189+ }
64190 dir->nlink++;
64191 } else if (S_ISLNK(dp->mode)) {
64192 dp->proc_iops = &proc_link_inode_operations;
64193@@ -407,6 +444,27 @@ struct proc_dir_entry *proc_mkdir_data(const char *name, umode_t mode,
64194 }
64195 EXPORT_SYMBOL_GPL(proc_mkdir_data);
64196
64197+struct proc_dir_entry *proc_mkdir_data_restrict(const char *name, umode_t mode,
64198+ struct proc_dir_entry *parent, void *data)
64199+{
64200+ struct proc_dir_entry *ent;
64201+
64202+ if (mode == 0)
64203+ mode = S_IRUGO | S_IXUGO;
64204+
64205+ ent = __proc_create(&parent, name, S_IFDIR | mode, 2);
64206+ if (ent) {
64207+ ent->data = data;
64208+ ent->restricted = 1;
64209+ if (proc_register(parent, ent) < 0) {
64210+ kfree(ent);
64211+ ent = NULL;
64212+ }
64213+ }
64214+ return ent;
64215+}
64216+EXPORT_SYMBOL_GPL(proc_mkdir_data_restrict);
64217+
64218 struct proc_dir_entry *proc_mkdir_mode(const char *name, umode_t mode,
64219 struct proc_dir_entry *parent)
64220 {
64221@@ -421,6 +479,13 @@ struct proc_dir_entry *proc_mkdir(const char *name,
64222 }
64223 EXPORT_SYMBOL(proc_mkdir);
64224
64225+struct proc_dir_entry *proc_mkdir_restrict(const char *name,
64226+ struct proc_dir_entry *parent)
64227+{
64228+ return proc_mkdir_data_restrict(name, 0, parent, NULL);
64229+}
64230+EXPORT_SYMBOL(proc_mkdir_restrict);
64231+
64232 struct proc_dir_entry *proc_create_data(const char *name, umode_t mode,
64233 struct proc_dir_entry *parent,
64234 const struct file_operations *proc_fops,
64235diff --git a/fs/proc/inode.c b/fs/proc/inode.c
64236index 124fc43..8afbb02 100644
64237--- a/fs/proc/inode.c
64238+++ b/fs/proc/inode.c
64239@@ -23,11 +23,17 @@
64240 #include <linux/slab.h>
64241 #include <linux/mount.h>
64242 #include <linux/magic.h>
64243+#include <linux/grsecurity.h>
64244
64245 #include <asm/uaccess.h>
64246
64247 #include "internal.h"
64248
64249+#ifdef CONFIG_PROC_SYSCTL
64250+extern const struct inode_operations proc_sys_inode_operations;
64251+extern const struct inode_operations proc_sys_dir_operations;
64252+#endif
64253+
64254 static void proc_evict_inode(struct inode *inode)
64255 {
64256 struct proc_dir_entry *de;
64257@@ -55,6 +61,13 @@ static void proc_evict_inode(struct inode *inode)
64258 ns = PROC_I(inode)->ns.ns;
64259 if (ns_ops && ns)
64260 ns_ops->put(ns);
64261+
64262+#ifdef CONFIG_PROC_SYSCTL
64263+ if (inode->i_op == &proc_sys_inode_operations ||
64264+ inode->i_op == &proc_sys_dir_operations)
64265+ gr_handle_delete(inode->i_ino, inode->i_sb->s_dev);
64266+#endif
64267+
64268 }
64269
64270 static struct kmem_cache * proc_inode_cachep;
64271@@ -413,7 +426,11 @@ struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de)
64272 if (de->mode) {
64273 inode->i_mode = de->mode;
64274 inode->i_uid = de->uid;
64275+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
64276+ inode->i_gid = grsec_proc_gid;
64277+#else
64278 inode->i_gid = de->gid;
64279+#endif
64280 }
64281 if (de->size)
64282 inode->i_size = de->size;
64283diff --git a/fs/proc/internal.h b/fs/proc/internal.h
64284index 651d09a..6a4b495 100644
64285--- a/fs/proc/internal.h
64286+++ b/fs/proc/internal.h
64287@@ -46,9 +46,10 @@ struct proc_dir_entry {
64288 struct completion *pde_unload_completion;
64289 struct list_head pde_openers; /* who did ->open, but not ->release */
64290 spinlock_t pde_unload_lock; /* proc_fops checks and pde_users bumps */
64291+ u8 restricted; /* a directory in /proc/net that should be restricted via GRKERNSEC_PROC */
64292 u8 namelen;
64293 char name[];
64294-};
64295+} __randomize_layout;
64296
64297 union proc_op {
64298 int (*proc_get_link)(struct dentry *, struct path *);
64299@@ -67,7 +68,7 @@ struct proc_inode {
64300 struct ctl_table *sysctl_entry;
64301 struct proc_ns ns;
64302 struct inode vfs_inode;
64303-};
64304+} __randomize_layout;
64305
64306 /*
64307 * General functions
64308@@ -155,6 +156,9 @@ extern int proc_pid_status(struct seq_file *, struct pid_namespace *,
64309 struct pid *, struct task_struct *);
64310 extern int proc_pid_statm(struct seq_file *, struct pid_namespace *,
64311 struct pid *, struct task_struct *);
64312+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
64313+extern int proc_pid_ipaddr(struct task_struct *task, char *buffer);
64314+#endif
64315
64316 /*
64317 * base.c
64318@@ -181,9 +185,11 @@ extern bool proc_fill_cache(struct file *, struct dir_context *, const char *, i
64319 extern spinlock_t proc_subdir_lock;
64320
64321 extern struct dentry *proc_lookup(struct inode *, struct dentry *, unsigned int);
64322+extern struct dentry *proc_lookup_restrict(struct inode *, struct dentry *, unsigned int);
64323 extern struct dentry *proc_lookup_de(struct proc_dir_entry *, struct inode *,
64324 struct dentry *);
64325 extern int proc_readdir(struct file *, struct dir_context *);
64326+extern int proc_readdir_restrict(struct file *, struct dir_context *);
64327 extern int proc_readdir_de(struct proc_dir_entry *, struct file *, struct dir_context *);
64328
64329 static inline struct proc_dir_entry *pde_get(struct proc_dir_entry *pde)
64330diff --git a/fs/proc/interrupts.c b/fs/proc/interrupts.c
64331index a352d57..cb94a5c 100644
64332--- a/fs/proc/interrupts.c
64333+++ b/fs/proc/interrupts.c
64334@@ -47,7 +47,11 @@ static const struct file_operations proc_interrupts_operations = {
64335
64336 static int __init proc_interrupts_init(void)
64337 {
64338+#ifdef CONFIG_GRKERNSEC_PROC_ADD
64339+ proc_create_grsec("interrupts", 0, NULL, &proc_interrupts_operations);
64340+#else
64341 proc_create("interrupts", 0, NULL, &proc_interrupts_operations);
64342+#endif
64343 return 0;
64344 }
64345 fs_initcall(proc_interrupts_init);
64346diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
64347index 39e6ef3..2f9cb5e 100644
64348--- a/fs/proc/kcore.c
64349+++ b/fs/proc/kcore.c
64350@@ -483,9 +483,10 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
64351 * the addresses in the elf_phdr on our list.
64352 */
64353 start = kc_offset_to_vaddr(*fpos - elf_buflen);
64354- if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
64355+ tsz = PAGE_SIZE - (start & ~PAGE_MASK);
64356+ if (tsz > buflen)
64357 tsz = buflen;
64358-
64359+
64360 while (buflen) {
64361 struct kcore_list *m;
64362
64363@@ -514,20 +515,23 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
64364 kfree(elf_buf);
64365 } else {
64366 if (kern_addr_valid(start)) {
64367- unsigned long n;
64368+ char *elf_buf;
64369+ mm_segment_t oldfs;
64370
64371- n = copy_to_user(buffer, (char *)start, tsz);
64372- /*
64373- * We cannot distinguish between fault on source
64374- * and fault on destination. When this happens
64375- * we clear too and hope it will trigger the
64376- * EFAULT again.
64377- */
64378- if (n) {
64379- if (clear_user(buffer + tsz - n,
64380- n))
64381+ elf_buf = kmalloc(tsz, GFP_KERNEL);
64382+ if (!elf_buf)
64383+ return -ENOMEM;
64384+ oldfs = get_fs();
64385+ set_fs(KERNEL_DS);
64386+ if (!__copy_from_user(elf_buf, (const void __user *)start, tsz)) {
64387+ set_fs(oldfs);
64388+ if (copy_to_user(buffer, elf_buf, tsz)) {
64389+ kfree(elf_buf);
64390 return -EFAULT;
64391+ }
64392 }
64393+ set_fs(oldfs);
64394+ kfree(elf_buf);
64395 } else {
64396 if (clear_user(buffer, tsz))
64397 return -EFAULT;
64398@@ -547,6 +551,9 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
64399
64400 static int open_kcore(struct inode *inode, struct file *filp)
64401 {
64402+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
64403+ return -EPERM;
64404+#endif
64405 if (!capable(CAP_SYS_RAWIO))
64406 return -EPERM;
64407 if (kcore_need_update)
64408diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c
64409index 136e548..1f88922 100644
64410--- a/fs/proc/meminfo.c
64411+++ b/fs/proc/meminfo.c
64412@@ -187,7 +187,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
64413 vmi.used >> 10,
64414 vmi.largest_chunk >> 10
64415 #ifdef CONFIG_MEMORY_FAILURE
64416- ,atomic_long_read(&num_poisoned_pages) << (PAGE_SHIFT - 10)
64417+ ,atomic_long_read_unchecked(&num_poisoned_pages) << (PAGE_SHIFT - 10)
64418 #endif
64419 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
64420 ,K(global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) *
64421diff --git a/fs/proc/nommu.c b/fs/proc/nommu.c
64422index d4a3574..b421ce9 100644
64423--- a/fs/proc/nommu.c
64424+++ b/fs/proc/nommu.c
64425@@ -64,7 +64,7 @@ static int nommu_region_show(struct seq_file *m, struct vm_region *region)
64426
64427 if (file) {
64428 seq_pad(m, ' ');
64429- seq_path(m, &file->f_path, "");
64430+ seq_path(m, &file->f_path, "\n\\");
64431 }
64432
64433 seq_putc(m, '\n');
64434diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c
64435index 4677bb7..94067cd 100644
64436--- a/fs/proc/proc_net.c
64437+++ b/fs/proc/proc_net.c
64438@@ -23,6 +23,7 @@
64439 #include <linux/nsproxy.h>
64440 #include <net/net_namespace.h>
64441 #include <linux/seq_file.h>
64442+#include <linux/grsecurity.h>
64443
64444 #include "internal.h"
64445
64446@@ -36,6 +37,8 @@ static struct net *get_proc_net(const struct inode *inode)
64447 return maybe_get_net(PDE_NET(PDE(inode)));
64448 }
64449
64450+extern const struct seq_operations dev_seq_ops;
64451+
64452 int seq_open_net(struct inode *ino, struct file *f,
64453 const struct seq_operations *ops, int size)
64454 {
64455@@ -44,6 +47,10 @@ int seq_open_net(struct inode *ino, struct file *f,
64456
64457 BUG_ON(size < sizeof(*p));
64458
64459+ /* only permit access to /proc/net/dev */
64460+ if (ops != &dev_seq_ops && gr_proc_is_restricted())
64461+ return -EACCES;
64462+
64463 net = get_proc_net(ino);
64464 if (net == NULL)
64465 return -ENXIO;
64466@@ -66,6 +73,9 @@ int single_open_net(struct inode *inode, struct file *file,
64467 int err;
64468 struct net *net;
64469
64470+ if (gr_proc_is_restricted())
64471+ return -EACCES;
64472+
64473 err = -ENXIO;
64474 net = get_proc_net(inode);
64475 if (net == NULL)
64476diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
64477index 7129046..6914844 100644
64478--- a/fs/proc/proc_sysctl.c
64479+++ b/fs/proc/proc_sysctl.c
64480@@ -11,13 +11,21 @@
64481 #include <linux/namei.h>
64482 #include <linux/mm.h>
64483 #include <linux/module.h>
64484+#include <linux/nsproxy.h>
64485+#ifdef CONFIG_GRKERNSEC
64486+#include <net/net_namespace.h>
64487+#endif
64488 #include "internal.h"
64489
64490+extern int gr_handle_chroot_sysctl(const int op);
64491+extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
64492+ const int op);
64493+
64494 static const struct dentry_operations proc_sys_dentry_operations;
64495 static const struct file_operations proc_sys_file_operations;
64496-static const struct inode_operations proc_sys_inode_operations;
64497+const struct inode_operations proc_sys_inode_operations;
64498 static const struct file_operations proc_sys_dir_file_operations;
64499-static const struct inode_operations proc_sys_dir_operations;
64500+const struct inode_operations proc_sys_dir_operations;
64501
64502 void proc_sys_poll_notify(struct ctl_table_poll *poll)
64503 {
64504@@ -467,6 +475,9 @@ static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry,
64505
64506 err = NULL;
64507 d_set_d_op(dentry, &proc_sys_dentry_operations);
64508+
64509+ gr_handle_proc_create(dentry, inode);
64510+
64511 d_add(dentry, inode);
64512
64513 out:
64514@@ -482,6 +493,7 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
64515 struct inode *inode = file_inode(filp);
64516 struct ctl_table_header *head = grab_header(inode);
64517 struct ctl_table *table = PROC_I(inode)->sysctl_entry;
64518+ int op = write ? MAY_WRITE : MAY_READ;
64519 ssize_t error;
64520 size_t res;
64521
64522@@ -493,7 +505,7 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
64523 * and won't be until we finish.
64524 */
64525 error = -EPERM;
64526- if (sysctl_perm(head, table, write ? MAY_WRITE : MAY_READ))
64527+ if (sysctl_perm(head, table, op))
64528 goto out;
64529
64530 /* if that can happen at all, it should be -EINVAL, not -EISDIR */
64531@@ -501,6 +513,27 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
64532 if (!table->proc_handler)
64533 goto out;
64534
64535+#ifdef CONFIG_GRKERNSEC
64536+ error = -EPERM;
64537+ if (gr_handle_chroot_sysctl(op))
64538+ goto out;
64539+ dget(filp->f_path.dentry);
64540+ if (gr_handle_sysctl_mod(filp->f_path.dentry->d_parent->d_name.name, table->procname, op)) {
64541+ dput(filp->f_path.dentry);
64542+ goto out;
64543+ }
64544+ dput(filp->f_path.dentry);
64545+ if (!gr_acl_handle_open(filp->f_path.dentry, filp->f_path.mnt, op))
64546+ goto out;
64547+ if (write) {
64548+ if (current->nsproxy->net_ns != table->extra2) {
64549+ if (!capable(CAP_SYS_ADMIN))
64550+ goto out;
64551+ } else if (!ns_capable(current->nsproxy->net_ns->user_ns, CAP_NET_ADMIN))
64552+ goto out;
64553+ }
64554+#endif
64555+
64556 /* careful: calling conventions are nasty here */
64557 res = count;
64558 error = table->proc_handler(table, write, buf, &res, ppos);
64559@@ -598,6 +631,9 @@ static bool proc_sys_fill_cache(struct file *file,
64560 return false;
64561 } else {
64562 d_set_d_op(child, &proc_sys_dentry_operations);
64563+
64564+ gr_handle_proc_create(child, inode);
64565+
64566 d_add(child, inode);
64567 }
64568 } else {
64569@@ -641,6 +677,9 @@ static int scan(struct ctl_table_header *head, ctl_table *table,
64570 if ((*pos)++ < ctx->pos)
64571 return true;
64572
64573+ if (!gr_acl_handle_hidden_file(file->f_path.dentry, file->f_path.mnt))
64574+ return 0;
64575+
64576 if (unlikely(S_ISLNK(table->mode)))
64577 res = proc_sys_link_fill_cache(file, ctx, head, table);
64578 else
64579@@ -734,6 +773,9 @@ static int proc_sys_getattr(struct vfsmount *mnt, struct dentry *dentry, struct
64580 if (IS_ERR(head))
64581 return PTR_ERR(head);
64582
64583+ if (table && !gr_acl_handle_hidden_file(dentry, mnt))
64584+ return -ENOENT;
64585+
64586 generic_fillattr(inode, stat);
64587 if (table)
64588 stat->mode = (stat->mode & S_IFMT) | table->mode;
64589@@ -756,13 +798,13 @@ static const struct file_operations proc_sys_dir_file_operations = {
64590 .llseek = generic_file_llseek,
64591 };
64592
64593-static const struct inode_operations proc_sys_inode_operations = {
64594+const struct inode_operations proc_sys_inode_operations = {
64595 .permission = proc_sys_permission,
64596 .setattr = proc_sys_setattr,
64597 .getattr = proc_sys_getattr,
64598 };
64599
64600-static const struct inode_operations proc_sys_dir_operations = {
64601+const struct inode_operations proc_sys_dir_operations = {
64602 .lookup = proc_sys_lookup,
64603 .permission = proc_sys_permission,
64604 .setattr = proc_sys_setattr,
64605@@ -839,7 +881,7 @@ static struct ctl_dir *find_subdir(struct ctl_dir *dir,
64606 static struct ctl_dir *new_dir(struct ctl_table_set *set,
64607 const char *name, int namelen)
64608 {
64609- struct ctl_table *table;
64610+ ctl_table_no_const *table;
64611 struct ctl_dir *new;
64612 struct ctl_node *node;
64613 char *new_name;
64614@@ -851,7 +893,7 @@ static struct ctl_dir *new_dir(struct ctl_table_set *set,
64615 return NULL;
64616
64617 node = (struct ctl_node *)(new + 1);
64618- table = (struct ctl_table *)(node + 1);
64619+ table = (ctl_table_no_const *)(node + 1);
64620 new_name = (char *)(table + 2);
64621 memcpy(new_name, name, namelen);
64622 new_name[namelen] = '\0';
64623@@ -1020,7 +1062,8 @@ static int sysctl_check_table(const char *path, struct ctl_table *table)
64624 static struct ctl_table_header *new_links(struct ctl_dir *dir, struct ctl_table *table,
64625 struct ctl_table_root *link_root)
64626 {
64627- struct ctl_table *link_table, *entry, *link;
64628+ ctl_table_no_const *link_table, *link;
64629+ struct ctl_table *entry;
64630 struct ctl_table_header *links;
64631 struct ctl_node *node;
64632 char *link_name;
64633@@ -1043,7 +1086,7 @@ static struct ctl_table_header *new_links(struct ctl_dir *dir, struct ctl_table
64634 return NULL;
64635
64636 node = (struct ctl_node *)(links + 1);
64637- link_table = (struct ctl_table *)(node + nr_entries);
64638+ link_table = (ctl_table_no_const *)(node + nr_entries);
64639 link_name = (char *)&link_table[nr_entries + 1];
64640
64641 for (link = link_table, entry = table; entry->procname; link++, entry++) {
64642@@ -1291,8 +1334,8 @@ static int register_leaf_sysctl_tables(const char *path, char *pos,
64643 struct ctl_table_header ***subheader, struct ctl_table_set *set,
64644 struct ctl_table *table)
64645 {
64646- struct ctl_table *ctl_table_arg = NULL;
64647- struct ctl_table *entry, *files;
64648+ ctl_table_no_const *ctl_table_arg = NULL, *files = NULL;
64649+ struct ctl_table *entry;
64650 int nr_files = 0;
64651 int nr_dirs = 0;
64652 int err = -ENOMEM;
64653@@ -1304,10 +1347,9 @@ static int register_leaf_sysctl_tables(const char *path, char *pos,
64654 nr_files++;
64655 }
64656
64657- files = table;
64658 /* If there are mixed files and directories we need a new table */
64659 if (nr_dirs && nr_files) {
64660- struct ctl_table *new;
64661+ ctl_table_no_const *new;
64662 files = kzalloc(sizeof(struct ctl_table) * (nr_files + 1),
64663 GFP_KERNEL);
64664 if (!files)
64665@@ -1325,7 +1367,7 @@ static int register_leaf_sysctl_tables(const char *path, char *pos,
64666 /* Register everything except a directory full of subdirectories */
64667 if (nr_files || !nr_dirs) {
64668 struct ctl_table_header *header;
64669- header = __register_sysctl_table(set, path, files);
64670+ header = __register_sysctl_table(set, path, files ? files : table);
64671 if (!header) {
64672 kfree(ctl_table_arg);
64673 goto out;
64674diff --git a/fs/proc/root.c b/fs/proc/root.c
64675index 87dbcbe..55e1b4d 100644
64676--- a/fs/proc/root.c
64677+++ b/fs/proc/root.c
64678@@ -186,7 +186,15 @@ void __init proc_root_init(void)
64679 #ifdef CONFIG_PROC_DEVICETREE
64680 proc_device_tree_init();
64681 #endif
64682+#ifdef CONFIG_GRKERNSEC_PROC_ADD
64683+#ifdef CONFIG_GRKERNSEC_PROC_USER
64684+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
64685+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
64686+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
64687+#endif
64688+#else
64689 proc_mkdir("bus", NULL);
64690+#endif
64691 proc_sys_init();
64692 }
64693
64694diff --git a/fs/proc/stat.c b/fs/proc/stat.c
64695index 6f599c6..bd00271 100644
64696--- a/fs/proc/stat.c
64697+++ b/fs/proc/stat.c
64698@@ -11,6 +11,7 @@
64699 #include <linux/irqnr.h>
64700 #include <asm/cputime.h>
64701 #include <linux/tick.h>
64702+#include <linux/grsecurity.h>
64703
64704 #ifndef arch_irq_stat_cpu
64705 #define arch_irq_stat_cpu(cpu) 0
64706@@ -87,6 +88,18 @@ static int show_stat(struct seq_file *p, void *v)
64707 u64 sum_softirq = 0;
64708 unsigned int per_softirq_sums[NR_SOFTIRQS] = {0};
64709 struct timespec boottime;
64710+ int unrestricted = 1;
64711+
64712+#ifdef CONFIG_GRKERNSEC_PROC_ADD
64713+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
64714+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID)
64715+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
64716+ && !in_group_p(grsec_proc_gid)
64717+#endif
64718+ )
64719+ unrestricted = 0;
64720+#endif
64721+#endif
64722
64723 user = nice = system = idle = iowait =
64724 irq = softirq = steal = 0;
64725@@ -99,23 +112,25 @@ static int show_stat(struct seq_file *p, void *v)
64726 nice += kcpustat_cpu(i).cpustat[CPUTIME_NICE];
64727 system += kcpustat_cpu(i).cpustat[CPUTIME_SYSTEM];
64728 idle += get_idle_time(i);
64729- iowait += get_iowait_time(i);
64730- irq += kcpustat_cpu(i).cpustat[CPUTIME_IRQ];
64731- softirq += kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ];
64732- steal += kcpustat_cpu(i).cpustat[CPUTIME_STEAL];
64733- guest += kcpustat_cpu(i).cpustat[CPUTIME_GUEST];
64734- guest_nice += kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE];
64735- sum += kstat_cpu_irqs_sum(i);
64736- sum += arch_irq_stat_cpu(i);
64737+ if (unrestricted) {
64738+ iowait += get_iowait_time(i);
64739+ irq += kcpustat_cpu(i).cpustat[CPUTIME_IRQ];
64740+ softirq += kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ];
64741+ steal += kcpustat_cpu(i).cpustat[CPUTIME_STEAL];
64742+ guest += kcpustat_cpu(i).cpustat[CPUTIME_GUEST];
64743+ guest_nice += kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE];
64744+ sum += kstat_cpu_irqs_sum(i);
64745+ sum += arch_irq_stat_cpu(i);
64746+ for (j = 0; j < NR_SOFTIRQS; j++) {
64747+ unsigned int softirq_stat = kstat_softirqs_cpu(j, i);
64748
64749- for (j = 0; j < NR_SOFTIRQS; j++) {
64750- unsigned int softirq_stat = kstat_softirqs_cpu(j, i);
64751-
64752- per_softirq_sums[j] += softirq_stat;
64753- sum_softirq += softirq_stat;
64754+ per_softirq_sums[j] += softirq_stat;
64755+ sum_softirq += softirq_stat;
64756+ }
64757 }
64758 }
64759- sum += arch_irq_stat();
64760+ if (unrestricted)
64761+ sum += arch_irq_stat();
64762
64763 seq_puts(p, "cpu ");
64764 seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(user));
64765@@ -136,12 +151,14 @@ static int show_stat(struct seq_file *p, void *v)
64766 nice = kcpustat_cpu(i).cpustat[CPUTIME_NICE];
64767 system = kcpustat_cpu(i).cpustat[CPUTIME_SYSTEM];
64768 idle = get_idle_time(i);
64769- iowait = get_iowait_time(i);
64770- irq = kcpustat_cpu(i).cpustat[CPUTIME_IRQ];
64771- softirq = kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ];
64772- steal = kcpustat_cpu(i).cpustat[CPUTIME_STEAL];
64773- guest = kcpustat_cpu(i).cpustat[CPUTIME_GUEST];
64774- guest_nice = kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE];
64775+ if (unrestricted) {
64776+ iowait = get_iowait_time(i);
64777+ irq = kcpustat_cpu(i).cpustat[CPUTIME_IRQ];
64778+ softirq = kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ];
64779+ steal = kcpustat_cpu(i).cpustat[CPUTIME_STEAL];
64780+ guest = kcpustat_cpu(i).cpustat[CPUTIME_GUEST];
64781+ guest_nice = kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE];
64782+ }
64783 seq_printf(p, "cpu%d", i);
64784 seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(user));
64785 seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(nice));
64786@@ -159,7 +176,7 @@ static int show_stat(struct seq_file *p, void *v)
64787
64788 /* sum again ? it could be updated? */
64789 for_each_irq_nr(j)
64790- seq_put_decimal_ull(p, ' ', kstat_irqs(j));
64791+ seq_put_decimal_ull(p, ' ', unrestricted ? kstat_irqs(j) : 0ULL);
64792
64793 seq_printf(p,
64794 "\nctxt %llu\n"
64795@@ -167,11 +184,11 @@ static int show_stat(struct seq_file *p, void *v)
64796 "processes %lu\n"
64797 "procs_running %lu\n"
64798 "procs_blocked %lu\n",
64799- nr_context_switches(),
64800+ unrestricted ? nr_context_switches() : 0ULL,
64801 (unsigned long)jif,
64802- total_forks,
64803- nr_running(),
64804- nr_iowait());
64805+ unrestricted ? total_forks : 0UL,
64806+ unrestricted ? nr_running() : 0UL,
64807+ unrestricted ? nr_iowait() : 0UL);
64808
64809 seq_printf(p, "softirq %llu", (unsigned long long)sum_softirq);
64810
64811diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
64812index fb52b54..5fc7c14 100644
64813--- a/fs/proc/task_mmu.c
64814+++ b/fs/proc/task_mmu.c
64815@@ -12,12 +12,19 @@
64816 #include <linux/swap.h>
64817 #include <linux/swapops.h>
64818 #include <linux/mmu_notifier.h>
64819+#include <linux/grsecurity.h>
64820
64821 #include <asm/elf.h>
64822 #include <asm/uaccess.h>
64823 #include <asm/tlbflush.h>
64824 #include "internal.h"
64825
64826+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
64827+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
64828+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
64829+ _mm->pax_flags & MF_PAX_SEGMEXEC))
64830+#endif
64831+
64832 void task_mem(struct seq_file *m, struct mm_struct *mm)
64833 {
64834 unsigned long data, text, lib, swap;
64835@@ -53,8 +60,13 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
64836 "VmExe:\t%8lu kB\n"
64837 "VmLib:\t%8lu kB\n"
64838 "VmPTE:\t%8lu kB\n"
64839- "VmSwap:\t%8lu kB\n",
64840- hiwater_vm << (PAGE_SHIFT-10),
64841+ "VmSwap:\t%8lu kB\n"
64842+
64843+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
64844+ "CsBase:\t%8lx\nCsLim:\t%8lx\n"
64845+#endif
64846+
64847+ ,hiwater_vm << (PAGE_SHIFT-10),
64848 total_vm << (PAGE_SHIFT-10),
64849 mm->locked_vm << (PAGE_SHIFT-10),
64850 mm->pinned_vm << (PAGE_SHIFT-10),
64851@@ -64,7 +76,19 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
64852 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
64853 (PTRS_PER_PTE * sizeof(pte_t) *
64854 atomic_long_read(&mm->nr_ptes)) >> 10,
64855- swap << (PAGE_SHIFT-10));
64856+ swap << (PAGE_SHIFT-10)
64857+
64858+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
64859+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
64860+ , PAX_RAND_FLAGS(mm) ? 0 : mm->context.user_cs_base
64861+ , PAX_RAND_FLAGS(mm) ? 0 : mm->context.user_cs_limit
64862+#else
64863+ , mm->context.user_cs_base
64864+ , mm->context.user_cs_limit
64865+#endif
64866+#endif
64867+
64868+ );
64869 }
64870
64871 unsigned long task_vsize(struct mm_struct *mm)
64872@@ -270,13 +294,13 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
64873 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
64874 }
64875
64876- /* We don't show the stack guard page in /proc/maps */
64877+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
64878+ start = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start;
64879+ end = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end;
64880+#else
64881 start = vma->vm_start;
64882- if (stack_guard_page_start(vma, start))
64883- start += PAGE_SIZE;
64884 end = vma->vm_end;
64885- if (stack_guard_page_end(vma, end))
64886- end -= PAGE_SIZE;
64887+#endif
64888
64889 seq_setwidth(m, 25 + sizeof(void *) * 6 - 1);
64890 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu ",
64891@@ -286,7 +310,11 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
64892 flags & VM_WRITE ? 'w' : '-',
64893 flags & VM_EXEC ? 'x' : '-',
64894 flags & VM_MAYSHARE ? 's' : 'p',
64895+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
64896+ PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
64897+#else
64898 pgoff,
64899+#endif
64900 MAJOR(dev), MINOR(dev), ino);
64901
64902 /*
64903@@ -295,7 +323,7 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
64904 */
64905 if (file) {
64906 seq_pad(m, ' ');
64907- seq_path(m, &file->f_path, "\n");
64908+ seq_path(m, &file->f_path, "\n\\");
64909 goto done;
64910 }
64911
64912@@ -321,8 +349,9 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
64913 * Thread stack in /proc/PID/task/TID/maps or
64914 * the main process stack.
64915 */
64916- if (!is_pid || (vma->vm_start <= mm->start_stack &&
64917- vma->vm_end >= mm->start_stack)) {
64918+ if (!is_pid || (vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
64919+ (vma->vm_start <= mm->start_stack &&
64920+ vma->vm_end >= mm->start_stack)) {
64921 name = "[stack]";
64922 } else {
64923 /* Thread stack in /proc/PID/maps */
64924@@ -346,6 +375,13 @@ static int show_map(struct seq_file *m, void *v, int is_pid)
64925 struct proc_maps_private *priv = m->private;
64926 struct task_struct *task = priv->task;
64927
64928+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
64929+ if (current->exec_id != m->exec_id) {
64930+ gr_log_badprocpid("maps");
64931+ return 0;
64932+ }
64933+#endif
64934+
64935 show_map_vma(m, vma, is_pid);
64936
64937 if (m->count < m->size) /* vma is copied successfully */
64938@@ -586,12 +622,23 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
64939 .private = &mss,
64940 };
64941
64942+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
64943+ if (current->exec_id != m->exec_id) {
64944+ gr_log_badprocpid("smaps");
64945+ return 0;
64946+ }
64947+#endif
64948 memset(&mss, 0, sizeof mss);
64949- mss.vma = vma;
64950- /* mmap_sem is held in m_start */
64951- if (vma->vm_mm && !is_vm_hugetlb_page(vma))
64952- walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
64953-
64954+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
64955+ if (!PAX_RAND_FLAGS(vma->vm_mm)) {
64956+#endif
64957+ mss.vma = vma;
64958+ /* mmap_sem is held in m_start */
64959+ if (vma->vm_mm && !is_vm_hugetlb_page(vma))
64960+ walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
64961+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
64962+ }
64963+#endif
64964 show_map_vma(m, vma, is_pid);
64965
64966 seq_printf(m,
64967@@ -609,7 +656,11 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
64968 "KernelPageSize: %8lu kB\n"
64969 "MMUPageSize: %8lu kB\n"
64970 "Locked: %8lu kB\n",
64971+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
64972+ PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
64973+#else
64974 (vma->vm_end - vma->vm_start) >> 10,
64975+#endif
64976 mss.resident >> 10,
64977 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
64978 mss.shared_clean >> 10,
64979@@ -1387,6 +1438,13 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
64980 char buffer[64];
64981 int nid;
64982
64983+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
64984+ if (current->exec_id != m->exec_id) {
64985+ gr_log_badprocpid("numa_maps");
64986+ return 0;
64987+ }
64988+#endif
64989+
64990 if (!mm)
64991 return 0;
64992
64993@@ -1404,11 +1462,15 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
64994 mpol_to_str(buffer, sizeof(buffer), pol);
64995 mpol_cond_put(pol);
64996
64997+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
64998+ seq_printf(m, "%08lx %s", PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : vma->vm_start, buffer);
64999+#else
65000 seq_printf(m, "%08lx %s", vma->vm_start, buffer);
65001+#endif
65002
65003 if (file) {
65004 seq_printf(m, " file=");
65005- seq_path(m, &file->f_path, "\n\t= ");
65006+ seq_path(m, &file->f_path, "\n\t\\= ");
65007 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
65008 seq_printf(m, " heap");
65009 } else {
65010diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
65011index 678455d..ebd3245 100644
65012--- a/fs/proc/task_nommu.c
65013+++ b/fs/proc/task_nommu.c
65014@@ -51,7 +51,7 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
65015 else
65016 bytes += kobjsize(mm);
65017
65018- if (current->fs && current->fs->users > 1)
65019+ if (current->fs && atomic_read(&current->fs->users) > 1)
65020 sbytes += kobjsize(current->fs);
65021 else
65022 bytes += kobjsize(current->fs);
65023@@ -161,7 +161,7 @@ static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma,
65024
65025 if (file) {
65026 seq_pad(m, ' ');
65027- seq_path(m, &file->f_path, "");
65028+ seq_path(m, &file->f_path, "\n\\");
65029 } else if (mm) {
65030 pid_t tid = vm_is_stack(priv->task, vma, is_pid);
65031
65032diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c
65033index 88d4585..c10bfeb 100644
65034--- a/fs/proc/vmcore.c
65035+++ b/fs/proc/vmcore.c
65036@@ -105,9 +105,13 @@ static ssize_t read_from_oldmem(char *buf, size_t count,
65037 nr_bytes = count;
65038
65039 /* If pfn is not ram, return zeros for sparse dump files */
65040- if (pfn_is_ram(pfn) == 0)
65041- memset(buf, 0, nr_bytes);
65042- else {
65043+ if (pfn_is_ram(pfn) == 0) {
65044+ if (userbuf) {
65045+ if (clear_user((char __force_user *)buf, nr_bytes))
65046+ return -EFAULT;
65047+ } else
65048+ memset(buf, 0, nr_bytes);
65049+ } else {
65050 tmp = copy_oldmem_page(pfn, buf, nr_bytes,
65051 offset, userbuf);
65052 if (tmp < 0)
65053@@ -170,7 +174,7 @@ int __weak remap_oldmem_pfn_range(struct vm_area_struct *vma,
65054 static int copy_to(void *target, void *src, size_t size, int userbuf)
65055 {
65056 if (userbuf) {
65057- if (copy_to_user((char __user *) target, src, size))
65058+ if (copy_to_user((char __force_user *) target, src, size))
65059 return -EFAULT;
65060 } else {
65061 memcpy(target, src, size);
65062@@ -233,7 +237,7 @@ static ssize_t __read_vmcore(char *buffer, size_t buflen, loff_t *fpos,
65063 if (*fpos < m->offset + m->size) {
65064 tsz = min_t(size_t, m->offset + m->size - *fpos, buflen);
65065 start = m->paddr + *fpos - m->offset;
65066- tmp = read_from_oldmem(buffer, tsz, &start, userbuf);
65067+ tmp = read_from_oldmem((char __force_kernel *)buffer, tsz, &start, userbuf);
65068 if (tmp < 0)
65069 return tmp;
65070 buflen -= tsz;
65071@@ -253,7 +257,7 @@ static ssize_t __read_vmcore(char *buffer, size_t buflen, loff_t *fpos,
65072 static ssize_t read_vmcore(struct file *file, char __user *buffer,
65073 size_t buflen, loff_t *fpos)
65074 {
65075- return __read_vmcore((__force char *) buffer, buflen, fpos, 1);
65076+ return __read_vmcore((__force_kernel char *) buffer, buflen, fpos, 1);
65077 }
65078
65079 /*
65080diff --git a/fs/qnx6/qnx6.h b/fs/qnx6/qnx6.h
65081index b00fcc9..e0c6381 100644
65082--- a/fs/qnx6/qnx6.h
65083+++ b/fs/qnx6/qnx6.h
65084@@ -74,7 +74,7 @@ enum {
65085 BYTESEX_BE,
65086 };
65087
65088-static inline __u64 fs64_to_cpu(struct qnx6_sb_info *sbi, __fs64 n)
65089+static inline __u64 __intentional_overflow(-1) fs64_to_cpu(struct qnx6_sb_info *sbi, __fs64 n)
65090 {
65091 if (sbi->s_bytesex == BYTESEX_LE)
65092 return le64_to_cpu((__force __le64)n);
65093@@ -90,7 +90,7 @@ static inline __fs64 cpu_to_fs64(struct qnx6_sb_info *sbi, __u64 n)
65094 return (__force __fs64)cpu_to_be64(n);
65095 }
65096
65097-static inline __u32 fs32_to_cpu(struct qnx6_sb_info *sbi, __fs32 n)
65098+static inline __u32 __intentional_overflow(-1) fs32_to_cpu(struct qnx6_sb_info *sbi, __fs32 n)
65099 {
65100 if (sbi->s_bytesex == BYTESEX_LE)
65101 return le32_to_cpu((__force __le32)n);
65102diff --git a/fs/quota/netlink.c b/fs/quota/netlink.c
65103index 72d2917..c917c12 100644
65104--- a/fs/quota/netlink.c
65105+++ b/fs/quota/netlink.c
65106@@ -45,7 +45,7 @@ static struct genl_family quota_genl_family = {
65107 void quota_send_warning(struct kqid qid, dev_t dev,
65108 const char warntype)
65109 {
65110- static atomic_t seq;
65111+ static atomic_unchecked_t seq;
65112 struct sk_buff *skb;
65113 void *msg_head;
65114 int ret;
65115@@ -61,7 +61,7 @@ void quota_send_warning(struct kqid qid, dev_t dev,
65116 "VFS: Not enough memory to send quota warning.\n");
65117 return;
65118 }
65119- msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq),
65120+ msg_head = genlmsg_put(skb, 0, atomic_add_return_unchecked(1, &seq),
65121 &quota_genl_family, 0, QUOTA_NL_C_WARNING);
65122 if (!msg_head) {
65123 printk(KERN_ERR
65124diff --git a/fs/read_write.c b/fs/read_write.c
65125index 28cc9c8..208e4fb 100644
65126--- a/fs/read_write.c
65127+++ b/fs/read_write.c
65128@@ -450,7 +450,7 @@ ssize_t __kernel_write(struct file *file, const char *buf, size_t count, loff_t
65129
65130 old_fs = get_fs();
65131 set_fs(get_ds());
65132- p = (__force const char __user *)buf;
65133+ p = (const char __force_user *)buf;
65134 if (count > MAX_RW_COUNT)
65135 count = MAX_RW_COUNT;
65136 if (file->f_op->write)
65137diff --git a/fs/readdir.c b/fs/readdir.c
65138index 5b53d99..a6c3049 100644
65139--- a/fs/readdir.c
65140+++ b/fs/readdir.c
65141@@ -17,6 +17,7 @@
65142 #include <linux/security.h>
65143 #include <linux/syscalls.h>
65144 #include <linux/unistd.h>
65145+#include <linux/namei.h>
65146
65147 #include <asm/uaccess.h>
65148
65149@@ -69,6 +70,7 @@ struct old_linux_dirent {
65150 struct readdir_callback {
65151 struct dir_context ctx;
65152 struct old_linux_dirent __user * dirent;
65153+ struct file * file;
65154 int result;
65155 };
65156
65157@@ -86,6 +88,10 @@ static int fillonedir(void * __buf, const char * name, int namlen, loff_t offset
65158 buf->result = -EOVERFLOW;
65159 return -EOVERFLOW;
65160 }
65161+
65162+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
65163+ return 0;
65164+
65165 buf->result++;
65166 dirent = buf->dirent;
65167 if (!access_ok(VERIFY_WRITE, dirent,
65168@@ -117,6 +123,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned int, fd,
65169 if (!f.file)
65170 return -EBADF;
65171
65172+ buf.file = f.file;
65173 error = iterate_dir(f.file, &buf.ctx);
65174 if (buf.result)
65175 error = buf.result;
65176@@ -142,6 +149,7 @@ struct getdents_callback {
65177 struct dir_context ctx;
65178 struct linux_dirent __user * current_dir;
65179 struct linux_dirent __user * previous;
65180+ struct file * file;
65181 int count;
65182 int error;
65183 };
65184@@ -163,6 +171,10 @@ static int filldir(void * __buf, const char * name, int namlen, loff_t offset,
65185 buf->error = -EOVERFLOW;
65186 return -EOVERFLOW;
65187 }
65188+
65189+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
65190+ return 0;
65191+
65192 dirent = buf->previous;
65193 if (dirent) {
65194 if (__put_user(offset, &dirent->d_off))
65195@@ -208,6 +220,7 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
65196 if (!f.file)
65197 return -EBADF;
65198
65199+ buf.file = f.file;
65200 error = iterate_dir(f.file, &buf.ctx);
65201 if (error >= 0)
65202 error = buf.error;
65203@@ -226,6 +239,7 @@ struct getdents_callback64 {
65204 struct dir_context ctx;
65205 struct linux_dirent64 __user * current_dir;
65206 struct linux_dirent64 __user * previous;
65207+ struct file *file;
65208 int count;
65209 int error;
65210 };
65211@@ -241,6 +255,10 @@ static int filldir64(void * __buf, const char * name, int namlen, loff_t offset,
65212 buf->error = -EINVAL; /* only used if we fail.. */
65213 if (reclen > buf->count)
65214 return -EINVAL;
65215+
65216+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
65217+ return 0;
65218+
65219 dirent = buf->previous;
65220 if (dirent) {
65221 if (__put_user(offset, &dirent->d_off))
65222@@ -288,6 +306,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
65223 if (!f.file)
65224 return -EBADF;
65225
65226+ buf.file = f.file;
65227 error = iterate_dir(f.file, &buf.ctx);
65228 if (error >= 0)
65229 error = buf.error;
65230diff --git a/fs/reiserfs/do_balan.c b/fs/reiserfs/do_balan.c
65231index 9a3c68c..767933e 100644
65232--- a/fs/reiserfs/do_balan.c
65233+++ b/fs/reiserfs/do_balan.c
65234@@ -1546,7 +1546,7 @@ void do_balance(struct tree_balance *tb, /* tree_balance structure */
65235 return;
65236 }
65237
65238- atomic_inc(&(fs_generation(tb->tb_sb)));
65239+ atomic_inc_unchecked(&(fs_generation(tb->tb_sb)));
65240 do_balance_starts(tb);
65241
65242 /* balance leaf returns 0 except if combining L R and S into
65243diff --git a/fs/reiserfs/item_ops.c b/fs/reiserfs/item_ops.c
65244index ee382ef..f4eb6eb5 100644
65245--- a/fs/reiserfs/item_ops.c
65246+++ b/fs/reiserfs/item_ops.c
65247@@ -725,18 +725,18 @@ static void errcatch_print_vi(struct virtual_item *vi)
65248 }
65249
65250 static struct item_operations errcatch_ops = {
65251- errcatch_bytes_number,
65252- errcatch_decrement_key,
65253- errcatch_is_left_mergeable,
65254- errcatch_print_item,
65255- errcatch_check_item,
65256+ .bytes_number = errcatch_bytes_number,
65257+ .decrement_key = errcatch_decrement_key,
65258+ .is_left_mergeable = errcatch_is_left_mergeable,
65259+ .print_item = errcatch_print_item,
65260+ .check_item = errcatch_check_item,
65261
65262- errcatch_create_vi,
65263- errcatch_check_left,
65264- errcatch_check_right,
65265- errcatch_part_size,
65266- errcatch_unit_num,
65267- errcatch_print_vi
65268+ .create_vi = errcatch_create_vi,
65269+ .check_left = errcatch_check_left,
65270+ .check_right = errcatch_check_right,
65271+ .part_size = errcatch_part_size,
65272+ .unit_num = errcatch_unit_num,
65273+ .print_vi = errcatch_print_vi
65274 };
65275
65276 //////////////////////////////////////////////////////////////////////////////
65277diff --git a/fs/reiserfs/procfs.c b/fs/reiserfs/procfs.c
65278index 02b0b7d..c85018b 100644
65279--- a/fs/reiserfs/procfs.c
65280+++ b/fs/reiserfs/procfs.c
65281@@ -114,7 +114,7 @@ static int show_super(struct seq_file *m, void *unused)
65282 "SMALL_TAILS " : "NO_TAILS ",
65283 replay_only(sb) ? "REPLAY_ONLY " : "",
65284 convert_reiserfs(sb) ? "CONV " : "",
65285- atomic_read(&r->s_generation_counter),
65286+ atomic_read_unchecked(&r->s_generation_counter),
65287 SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
65288 SF(s_do_balance), SF(s_unneeded_left_neighbor),
65289 SF(s_good_search_by_key_reada), SF(s_bmaps),
65290diff --git a/fs/reiserfs/reiserfs.h b/fs/reiserfs/reiserfs.h
65291index 8d06adf..7e1c9f8 100644
65292--- a/fs/reiserfs/reiserfs.h
65293+++ b/fs/reiserfs/reiserfs.h
65294@@ -453,7 +453,7 @@ struct reiserfs_sb_info {
65295 /* Comment? -Hans */
65296 wait_queue_head_t s_wait;
65297 /* To be obsoleted soon by per buffer seals.. -Hans */
65298- atomic_t s_generation_counter; // increased by one every time the
65299+ atomic_unchecked_t s_generation_counter; // increased by one every time the
65300 // tree gets re-balanced
65301 unsigned long s_properties; /* File system properties. Currently holds
65302 on-disk FS format */
65303@@ -1972,7 +1972,7 @@ static inline loff_t max_reiserfs_offset(struct inode *inode)
65304 #define REISERFS_USER_MEM 1 /* reiserfs user memory mode */
65305
65306 #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
65307-#define get_generation(s) atomic_read (&fs_generation(s))
65308+#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
65309 #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
65310 #define __fs_changed(gen,s) (gen != get_generation (s))
65311 #define fs_changed(gen,s) \
65312diff --git a/fs/select.c b/fs/select.c
65313index 467bb1c..cf9d65a 100644
65314--- a/fs/select.c
65315+++ b/fs/select.c
65316@@ -20,6 +20,7 @@
65317 #include <linux/export.h>
65318 #include <linux/slab.h>
65319 #include <linux/poll.h>
65320+#include <linux/security.h>
65321 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
65322 #include <linux/file.h>
65323 #include <linux/fdtable.h>
65324@@ -880,6 +881,7 @@ int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds,
65325 struct poll_list *walk = head;
65326 unsigned long todo = nfds;
65327
65328+ gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
65329 if (nfds > rlimit(RLIMIT_NOFILE))
65330 return -EINVAL;
65331
65332diff --git a/fs/seq_file.c b/fs/seq_file.c
65333index 1d641bb..c2f4743 100644
65334--- a/fs/seq_file.c
65335+++ b/fs/seq_file.c
65336@@ -10,6 +10,8 @@
65337 #include <linux/seq_file.h>
65338 #include <linux/slab.h>
65339 #include <linux/cred.h>
65340+#include <linux/sched.h>
65341+#include <linux/grsecurity.h>
65342
65343 #include <asm/uaccess.h>
65344 #include <asm/page.h>
65345@@ -60,6 +62,9 @@ int seq_open(struct file *file, const struct seq_operations *op)
65346 #ifdef CONFIG_USER_NS
65347 p->user_ns = file->f_cred->user_ns;
65348 #endif
65349+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
65350+ p->exec_id = current->exec_id;
65351+#endif
65352
65353 /*
65354 * Wrappers around seq_open(e.g. swaps_open) need to be
65355@@ -82,6 +87,16 @@ int seq_open(struct file *file, const struct seq_operations *op)
65356 }
65357 EXPORT_SYMBOL(seq_open);
65358
65359+
65360+int seq_open_restrict(struct file *file, const struct seq_operations *op)
65361+{
65362+ if (gr_proc_is_restricted())
65363+ return -EACCES;
65364+
65365+ return seq_open(file, op);
65366+}
65367+EXPORT_SYMBOL(seq_open_restrict);
65368+
65369 static int traverse(struct seq_file *m, loff_t offset)
65370 {
65371 loff_t pos = 0, index;
65372@@ -96,7 +111,7 @@ static int traverse(struct seq_file *m, loff_t offset)
65373 return 0;
65374 }
65375 if (!m->buf) {
65376- m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
65377+ m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL | GFP_USERCOPY);
65378 if (!m->buf)
65379 return -ENOMEM;
65380 }
65381@@ -137,7 +152,7 @@ Eoverflow:
65382 m->op->stop(m, p);
65383 kfree(m->buf);
65384 m->count = 0;
65385- m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
65386+ m->buf = kmalloc(m->size <<= 1, GFP_KERNEL | GFP_USERCOPY);
65387 return !m->buf ? -ENOMEM : -EAGAIN;
65388 }
65389
65390@@ -153,7 +168,7 @@ Eoverflow:
65391 ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
65392 {
65393 struct seq_file *m = file->private_data;
65394- size_t copied = 0;
65395+ ssize_t copied = 0;
65396 loff_t pos;
65397 size_t n;
65398 void *p;
65399@@ -192,7 +207,7 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
65400
65401 /* grab buffer if we didn't have one */
65402 if (!m->buf) {
65403- m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
65404+ m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL | GFP_USERCOPY);
65405 if (!m->buf)
65406 goto Enomem;
65407 }
65408@@ -234,7 +249,7 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
65409 m->op->stop(m, p);
65410 kfree(m->buf);
65411 m->count = 0;
65412- m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
65413+ m->buf = kmalloc(m->size <<= 1, GFP_KERNEL | GFP_USERCOPY);
65414 if (!m->buf)
65415 goto Enomem;
65416 m->version = 0;
65417@@ -584,7 +599,7 @@ static void single_stop(struct seq_file *p, void *v)
65418 int single_open(struct file *file, int (*show)(struct seq_file *, void *),
65419 void *data)
65420 {
65421- struct seq_operations *op = kmalloc(sizeof(*op), GFP_KERNEL);
65422+ seq_operations_no_const *op = kzalloc(sizeof(*op), GFP_KERNEL);
65423 int res = -ENOMEM;
65424
65425 if (op) {
65426@@ -620,6 +635,17 @@ int single_open_size(struct file *file, int (*show)(struct seq_file *, void *),
65427 }
65428 EXPORT_SYMBOL(single_open_size);
65429
65430+int single_open_restrict(struct file *file, int (*show)(struct seq_file *, void *),
65431+ void *data)
65432+{
65433+ if (gr_proc_is_restricted())
65434+ return -EACCES;
65435+
65436+ return single_open(file, show, data);
65437+}
65438+EXPORT_SYMBOL(single_open_restrict);
65439+
65440+
65441 int single_release(struct inode *inode, struct file *file)
65442 {
65443 const struct seq_operations *op = ((struct seq_file *)file->private_data)->op;
65444diff --git a/fs/splice.c b/fs/splice.c
65445index 12028fa..a6f2619 100644
65446--- a/fs/splice.c
65447+++ b/fs/splice.c
65448@@ -196,7 +196,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
65449 pipe_lock(pipe);
65450
65451 for (;;) {
65452- if (!pipe->readers) {
65453+ if (!atomic_read(&pipe->readers)) {
65454 send_sig(SIGPIPE, current, 0);
65455 if (!ret)
65456 ret = -EPIPE;
65457@@ -219,7 +219,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
65458 page_nr++;
65459 ret += buf->len;
65460
65461- if (pipe->files)
65462+ if (atomic_read(&pipe->files))
65463 do_wakeup = 1;
65464
65465 if (!--spd->nr_pages)
65466@@ -250,9 +250,9 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
65467 do_wakeup = 0;
65468 }
65469
65470- pipe->waiting_writers++;
65471+ atomic_inc(&pipe->waiting_writers);
65472 pipe_wait(pipe);
65473- pipe->waiting_writers--;
65474+ atomic_dec(&pipe->waiting_writers);
65475 }
65476
65477 pipe_unlock(pipe);
65478@@ -583,7 +583,7 @@ static ssize_t kernel_readv(struct file *file, const struct iovec *vec,
65479 old_fs = get_fs();
65480 set_fs(get_ds());
65481 /* The cast to a user pointer is valid due to the set_fs() */
65482- res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
65483+ res = vfs_readv(file, (const struct iovec __force_user *)vec, vlen, &pos);
65484 set_fs(old_fs);
65485
65486 return res;
65487@@ -598,7 +598,7 @@ ssize_t kernel_write(struct file *file, const char *buf, size_t count,
65488 old_fs = get_fs();
65489 set_fs(get_ds());
65490 /* The cast to a user pointer is valid due to the set_fs() */
65491- res = vfs_write(file, (__force const char __user *)buf, count, &pos);
65492+ res = vfs_write(file, (const char __force_user *)buf, count, &pos);
65493 set_fs(old_fs);
65494
65495 return res;
65496@@ -651,7 +651,7 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
65497 goto err;
65498
65499 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
65500- vec[i].iov_base = (void __user *) page_address(page);
65501+ vec[i].iov_base = (void __force_user *) page_address(page);
65502 vec[i].iov_len = this_len;
65503 spd.pages[i] = page;
65504 spd.nr_pages++;
65505@@ -847,7 +847,7 @@ int splice_from_pipe_feed(struct pipe_inode_info *pipe, struct splice_desc *sd,
65506 ops->release(pipe, buf);
65507 pipe->curbuf = (pipe->curbuf + 1) & (pipe->buffers - 1);
65508 pipe->nrbufs--;
65509- if (pipe->files)
65510+ if (atomic_read(&pipe->files))
65511 sd->need_wakeup = true;
65512 }
65513
65514@@ -872,10 +872,10 @@ EXPORT_SYMBOL(splice_from_pipe_feed);
65515 int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
65516 {
65517 while (!pipe->nrbufs) {
65518- if (!pipe->writers)
65519+ if (!atomic_read(&pipe->writers))
65520 return 0;
65521
65522- if (!pipe->waiting_writers && sd->num_spliced)
65523+ if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
65524 return 0;
65525
65526 if (sd->flags & SPLICE_F_NONBLOCK)
65527@@ -1197,7 +1197,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
65528 * out of the pipe right after the splice_to_pipe(). So set
65529 * PIPE_READERS appropriately.
65530 */
65531- pipe->readers = 1;
65532+ atomic_set(&pipe->readers, 1);
65533
65534 current->splice_pipe = pipe;
65535 }
65536@@ -1493,6 +1493,7 @@ static int get_iovec_page_array(const struct iovec __user *iov,
65537
65538 partial[buffers].offset = off;
65539 partial[buffers].len = plen;
65540+ partial[buffers].private = 0;
65541
65542 off = 0;
65543 len -= plen;
65544@@ -1795,9 +1796,9 @@ static int ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
65545 ret = -ERESTARTSYS;
65546 break;
65547 }
65548- if (!pipe->writers)
65549+ if (!atomic_read(&pipe->writers))
65550 break;
65551- if (!pipe->waiting_writers) {
65552+ if (!atomic_read(&pipe->waiting_writers)) {
65553 if (flags & SPLICE_F_NONBLOCK) {
65554 ret = -EAGAIN;
65555 break;
65556@@ -1829,7 +1830,7 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
65557 pipe_lock(pipe);
65558
65559 while (pipe->nrbufs >= pipe->buffers) {
65560- if (!pipe->readers) {
65561+ if (!atomic_read(&pipe->readers)) {
65562 send_sig(SIGPIPE, current, 0);
65563 ret = -EPIPE;
65564 break;
65565@@ -1842,9 +1843,9 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
65566 ret = -ERESTARTSYS;
65567 break;
65568 }
65569- pipe->waiting_writers++;
65570+ atomic_inc(&pipe->waiting_writers);
65571 pipe_wait(pipe);
65572- pipe->waiting_writers--;
65573+ atomic_dec(&pipe->waiting_writers);
65574 }
65575
65576 pipe_unlock(pipe);
65577@@ -1880,14 +1881,14 @@ retry:
65578 pipe_double_lock(ipipe, opipe);
65579
65580 do {
65581- if (!opipe->readers) {
65582+ if (!atomic_read(&opipe->readers)) {
65583 send_sig(SIGPIPE, current, 0);
65584 if (!ret)
65585 ret = -EPIPE;
65586 break;
65587 }
65588
65589- if (!ipipe->nrbufs && !ipipe->writers)
65590+ if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
65591 break;
65592
65593 /*
65594@@ -1984,7 +1985,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
65595 pipe_double_lock(ipipe, opipe);
65596
65597 do {
65598- if (!opipe->readers) {
65599+ if (!atomic_read(&opipe->readers)) {
65600 send_sig(SIGPIPE, current, 0);
65601 if (!ret)
65602 ret = -EPIPE;
65603@@ -2029,7 +2030,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
65604 * return EAGAIN if we have the potential of some data in the
65605 * future, otherwise just return 0
65606 */
65607- if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
65608+ if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
65609 ret = -EAGAIN;
65610
65611 pipe_unlock(ipipe);
65612diff --git a/fs/stat.c b/fs/stat.c
65613index ae0c3ce..9ee641c 100644
65614--- a/fs/stat.c
65615+++ b/fs/stat.c
65616@@ -28,8 +28,13 @@ void generic_fillattr(struct inode *inode, struct kstat *stat)
65617 stat->gid = inode->i_gid;
65618 stat->rdev = inode->i_rdev;
65619 stat->size = i_size_read(inode);
65620- stat->atime = inode->i_atime;
65621- stat->mtime = inode->i_mtime;
65622+ if (is_sidechannel_device(inode) && !capable_nolog(CAP_MKNOD)) {
65623+ stat->atime = inode->i_ctime;
65624+ stat->mtime = inode->i_ctime;
65625+ } else {
65626+ stat->atime = inode->i_atime;
65627+ stat->mtime = inode->i_mtime;
65628+ }
65629 stat->ctime = inode->i_ctime;
65630 stat->blksize = (1 << inode->i_blkbits);
65631 stat->blocks = inode->i_blocks;
65632@@ -52,9 +57,16 @@ EXPORT_SYMBOL(generic_fillattr);
65633 int vfs_getattr_nosec(struct path *path, struct kstat *stat)
65634 {
65635 struct inode *inode = path->dentry->d_inode;
65636+ int retval;
65637
65638- if (inode->i_op->getattr)
65639- return inode->i_op->getattr(path->mnt, path->dentry, stat);
65640+ if (inode->i_op->getattr) {
65641+ retval = inode->i_op->getattr(path->mnt, path->dentry, stat);
65642+ if (!retval && is_sidechannel_device(inode) && !capable_nolog(CAP_MKNOD)) {
65643+ stat->atime = stat->ctime;
65644+ stat->mtime = stat->ctime;
65645+ }
65646+ return retval;
65647+ }
65648
65649 generic_fillattr(inode, stat);
65650 return 0;
65651diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
65652index ee0d761..b346c58 100644
65653--- a/fs/sysfs/dir.c
65654+++ b/fs/sysfs/dir.c
65655@@ -62,9 +62,16 @@ void sysfs_warn_dup(struct kernfs_node *parent, const char *name)
65656 int sysfs_create_dir_ns(struct kobject *kobj, const void *ns)
65657 {
65658 struct kernfs_node *parent, *kn;
65659+ const char *name;
65660+ umode_t mode = S_IRWXU | S_IRUGO | S_IXUGO;
65661+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
65662+ const char *parent_name;
65663+#endif
65664
65665 BUG_ON(!kobj);
65666
65667+ name = kobject_name(kobj);
65668+
65669 if (kobj->parent)
65670 parent = kobj->parent->sd;
65671 else
65672@@ -73,11 +80,22 @@ int sysfs_create_dir_ns(struct kobject *kobj, const void *ns)
65673 if (!parent)
65674 return -ENOENT;
65675
65676- kn = kernfs_create_dir_ns(parent, kobject_name(kobj),
65677- S_IRWXU | S_IRUGO | S_IXUGO, kobj, ns);
65678+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
65679+ parent_name = parent->name;
65680+ mode = S_IRWXU;
65681+
65682+ if ((!strcmp(parent_name, "") && (!strcmp(name, "devices") || !strcmp(name, "fs"))) ||
65683+ (!strcmp(parent_name, "devices") && !strcmp(name, "system")) ||
65684+ (!strcmp(parent_name, "fs") && (!strcmp(name, "selinux") || !strcmp(name, "fuse") || !strcmp(name, "ecryptfs"))) ||
65685+ (!strcmp(parent_name, "system") && !strcmp(name, "cpu")))
65686+ mode = S_IRWXU | S_IRUGO | S_IXUGO;
65687+#endif
65688+
65689+ kn = kernfs_create_dir_ns(parent, name,
65690+ mode, kobj, ns);
65691 if (IS_ERR(kn)) {
65692 if (PTR_ERR(kn) == -EEXIST)
65693- sysfs_warn_dup(parent, kobject_name(kobj));
65694+ sysfs_warn_dup(parent, name);
65695 return PTR_ERR(kn);
65696 }
65697
65698diff --git a/fs/sysv/sysv.h b/fs/sysv/sysv.h
65699index 69d4889..a810bd4 100644
65700--- a/fs/sysv/sysv.h
65701+++ b/fs/sysv/sysv.h
65702@@ -188,7 +188,7 @@ static inline u32 PDP_swab(u32 x)
65703 #endif
65704 }
65705
65706-static inline __u32 fs32_to_cpu(struct sysv_sb_info *sbi, __fs32 n)
65707+static inline __u32 __intentional_overflow(-1) fs32_to_cpu(struct sysv_sb_info *sbi, __fs32 n)
65708 {
65709 if (sbi->s_bytesex == BYTESEX_PDP)
65710 return PDP_swab((__force __u32)n);
65711diff --git a/fs/ubifs/io.c b/fs/ubifs/io.c
65712index e18b988..f1d4ad0f 100644
65713--- a/fs/ubifs/io.c
65714+++ b/fs/ubifs/io.c
65715@@ -155,7 +155,7 @@ int ubifs_leb_change(struct ubifs_info *c, int lnum, const void *buf, int len)
65716 return err;
65717 }
65718
65719-int ubifs_leb_unmap(struct ubifs_info *c, int lnum)
65720+int __intentional_overflow(-1) ubifs_leb_unmap(struct ubifs_info *c, int lnum)
65721 {
65722 int err;
65723
65724diff --git a/fs/udf/misc.c b/fs/udf/misc.c
65725index c175b4d..8f36a16 100644
65726--- a/fs/udf/misc.c
65727+++ b/fs/udf/misc.c
65728@@ -289,7 +289,7 @@ void udf_new_tag(char *data, uint16_t ident, uint16_t version, uint16_t snum,
65729
65730 u8 udf_tag_checksum(const struct tag *t)
65731 {
65732- u8 *data = (u8 *)t;
65733+ const u8 *data = (const u8 *)t;
65734 u8 checksum = 0;
65735 int i;
65736 for (i = 0; i < sizeof(struct tag); ++i)
65737diff --git a/fs/ufs/swab.h b/fs/ufs/swab.h
65738index 8d974c4..b82f6ec 100644
65739--- a/fs/ufs/swab.h
65740+++ b/fs/ufs/swab.h
65741@@ -22,7 +22,7 @@ enum {
65742 BYTESEX_BE
65743 };
65744
65745-static inline u64
65746+static inline u64 __intentional_overflow(-1)
65747 fs64_to_cpu(struct super_block *sbp, __fs64 n)
65748 {
65749 if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE)
65750@@ -40,7 +40,7 @@ cpu_to_fs64(struct super_block *sbp, u64 n)
65751 return (__force __fs64)cpu_to_be64(n);
65752 }
65753
65754-static inline u32
65755+static inline u32 __intentional_overflow(-1)
65756 fs32_to_cpu(struct super_block *sbp, __fs32 n)
65757 {
65758 if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE)
65759diff --git a/fs/utimes.c b/fs/utimes.c
65760index aa138d6..5f3a811 100644
65761--- a/fs/utimes.c
65762+++ b/fs/utimes.c
65763@@ -1,6 +1,7 @@
65764 #include <linux/compiler.h>
65765 #include <linux/file.h>
65766 #include <linux/fs.h>
65767+#include <linux/security.h>
65768 #include <linux/linkage.h>
65769 #include <linux/mount.h>
65770 #include <linux/namei.h>
65771@@ -103,6 +104,12 @@ static int utimes_common(struct path *path, struct timespec *times)
65772 }
65773 }
65774 retry_deleg:
65775+
65776+ if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
65777+ error = -EACCES;
65778+ goto mnt_drop_write_and_out;
65779+ }
65780+
65781 mutex_lock(&inode->i_mutex);
65782 error = notify_change(path->dentry, &newattrs, &delegated_inode);
65783 mutex_unlock(&inode->i_mutex);
65784diff --git a/fs/xattr.c b/fs/xattr.c
65785index 3377dff..f394815 100644
65786--- a/fs/xattr.c
65787+++ b/fs/xattr.c
65788@@ -227,6 +227,27 @@ int vfs_xattr_cmp(struct dentry *dentry, const char *xattr_name,
65789 return rc;
65790 }
65791
65792+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
65793+ssize_t
65794+pax_getxattr(struct dentry *dentry, void *value, size_t size)
65795+{
65796+ struct inode *inode = dentry->d_inode;
65797+ ssize_t error;
65798+
65799+ error = inode_permission(inode, MAY_EXEC);
65800+ if (error)
65801+ return error;
65802+
65803+ if (inode->i_op->getxattr)
65804+ error = inode->i_op->getxattr(dentry, XATTR_NAME_PAX_FLAGS, value, size);
65805+ else
65806+ error = -EOPNOTSUPP;
65807+
65808+ return error;
65809+}
65810+EXPORT_SYMBOL(pax_getxattr);
65811+#endif
65812+
65813 ssize_t
65814 vfs_getxattr(struct dentry *dentry, const char *name, void *value, size_t size)
65815 {
65816@@ -319,7 +340,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
65817 * Extended attribute SET operations
65818 */
65819 static long
65820-setxattr(struct dentry *d, const char __user *name, const void __user *value,
65821+setxattr(struct path *path, const char __user *name, const void __user *value,
65822 size_t size, int flags)
65823 {
65824 int error;
65825@@ -355,7 +376,12 @@ setxattr(struct dentry *d, const char __user *name, const void __user *value,
65826 posix_acl_fix_xattr_from_user(kvalue, size);
65827 }
65828
65829- error = vfs_setxattr(d, kname, kvalue, size, flags);
65830+ if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) {
65831+ error = -EACCES;
65832+ goto out;
65833+ }
65834+
65835+ error = vfs_setxattr(path->dentry, kname, kvalue, size, flags);
65836 out:
65837 if (vvalue)
65838 vfree(vvalue);
65839@@ -377,7 +403,7 @@ retry:
65840 return error;
65841 error = mnt_want_write(path.mnt);
65842 if (!error) {
65843- error = setxattr(path.dentry, name, value, size, flags);
65844+ error = setxattr(&path, name, value, size, flags);
65845 mnt_drop_write(path.mnt);
65846 }
65847 path_put(&path);
65848@@ -401,7 +427,7 @@ retry:
65849 return error;
65850 error = mnt_want_write(path.mnt);
65851 if (!error) {
65852- error = setxattr(path.dentry, name, value, size, flags);
65853+ error = setxattr(&path, name, value, size, flags);
65854 mnt_drop_write(path.mnt);
65855 }
65856 path_put(&path);
65857@@ -416,16 +442,14 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, const char __user *, name,
65858 const void __user *,value, size_t, size, int, flags)
65859 {
65860 struct fd f = fdget(fd);
65861- struct dentry *dentry;
65862 int error = -EBADF;
65863
65864 if (!f.file)
65865 return error;
65866- dentry = f.file->f_path.dentry;
65867- audit_inode(NULL, dentry, 0);
65868+ audit_inode(NULL, f.file->f_path.dentry, 0);
65869 error = mnt_want_write_file(f.file);
65870 if (!error) {
65871- error = setxattr(dentry, name, value, size, flags);
65872+ error = setxattr(&f.file->f_path, name, value, size, flags);
65873 mnt_drop_write_file(f.file);
65874 }
65875 fdput(f);
65876@@ -626,7 +650,7 @@ SYSCALL_DEFINE3(flistxattr, int, fd, char __user *, list, size_t, size)
65877 * Extended attribute REMOVE operations
65878 */
65879 static long
65880-removexattr(struct dentry *d, const char __user *name)
65881+removexattr(struct path *path, const char __user *name)
65882 {
65883 int error;
65884 char kname[XATTR_NAME_MAX + 1];
65885@@ -637,7 +661,10 @@ removexattr(struct dentry *d, const char __user *name)
65886 if (error < 0)
65887 return error;
65888
65889- return vfs_removexattr(d, kname);
65890+ if (!gr_acl_handle_removexattr(path->dentry, path->mnt))
65891+ return -EACCES;
65892+
65893+ return vfs_removexattr(path->dentry, kname);
65894 }
65895
65896 SYSCALL_DEFINE2(removexattr, const char __user *, pathname,
65897@@ -652,7 +679,7 @@ retry:
65898 return error;
65899 error = mnt_want_write(path.mnt);
65900 if (!error) {
65901- error = removexattr(path.dentry, name);
65902+ error = removexattr(&path, name);
65903 mnt_drop_write(path.mnt);
65904 }
65905 path_put(&path);
65906@@ -675,7 +702,7 @@ retry:
65907 return error;
65908 error = mnt_want_write(path.mnt);
65909 if (!error) {
65910- error = removexattr(path.dentry, name);
65911+ error = removexattr(&path, name);
65912 mnt_drop_write(path.mnt);
65913 }
65914 path_put(&path);
65915@@ -689,16 +716,16 @@ retry:
65916 SYSCALL_DEFINE2(fremovexattr, int, fd, const char __user *, name)
65917 {
65918 struct fd f = fdget(fd);
65919- struct dentry *dentry;
65920+ struct path *path;
65921 int error = -EBADF;
65922
65923 if (!f.file)
65924 return error;
65925- dentry = f.file->f_path.dentry;
65926- audit_inode(NULL, dentry, 0);
65927+ path = &f.file->f_path;
65928+ audit_inode(NULL, path->dentry, 0);
65929 error = mnt_want_write_file(f.file);
65930 if (!error) {
65931- error = removexattr(dentry, name);
65932+ error = removexattr(path, name);
65933 mnt_drop_write_file(f.file);
65934 }
65935 fdput(f);
65936diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
65937index 152543c..d80c361 100644
65938--- a/fs/xfs/xfs_bmap.c
65939+++ b/fs/xfs/xfs_bmap.c
65940@@ -584,7 +584,7 @@ xfs_bmap_validate_ret(
65941
65942 #else
65943 #define xfs_bmap_check_leaf_extents(cur, ip, whichfork) do { } while (0)
65944-#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
65945+#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do { } while (0)
65946 #endif /* DEBUG */
65947
65948 /*
65949diff --git a/fs/xfs/xfs_dir2_readdir.c b/fs/xfs/xfs_dir2_readdir.c
65950index aead369..0dfecfd 100644
65951--- a/fs/xfs/xfs_dir2_readdir.c
65952+++ b/fs/xfs/xfs_dir2_readdir.c
65953@@ -160,7 +160,12 @@ xfs_dir2_sf_getdents(
65954 ino = dp->d_ops->sf_get_ino(sfp, sfep);
65955 filetype = dp->d_ops->sf_get_ftype(sfep);
65956 ctx->pos = off & 0x7fffffff;
65957- if (!dir_emit(ctx, (char *)sfep->name, sfep->namelen, ino,
65958+ if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) {
65959+ char name[sfep->namelen];
65960+ memcpy(name, sfep->name, sfep->namelen);
65961+ if (!dir_emit(ctx, name, sfep->namelen, ino, xfs_dir3_get_dtype(mp, filetype)))
65962+ return 0;
65963+ } else if (!dir_emit(ctx, (char *)sfep->name, sfep->namelen, ino,
65964 xfs_dir3_get_dtype(mp, filetype)))
65965 return 0;
65966 sfep = dp->d_ops->sf_nextentry(sfp, sfep);
65967diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
65968index bcfe612..aa399c0 100644
65969--- a/fs/xfs/xfs_ioctl.c
65970+++ b/fs/xfs/xfs_ioctl.c
65971@@ -122,7 +122,7 @@ xfs_find_handle(
65972 }
65973
65974 error = -EFAULT;
65975- if (copy_to_user(hreq->ohandle, &handle, hsize) ||
65976+ if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
65977 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
65978 goto out_put;
65979
65980diff --git a/grsecurity/Kconfig b/grsecurity/Kconfig
65981new file mode 100644
65982index 0000000..a14eb52
65983--- /dev/null
65984+++ b/grsecurity/Kconfig
65985@@ -0,0 +1,1174 @@
65986+#
65987+# grecurity configuration
65988+#
65989+menu "Memory Protections"
65990+depends on GRKERNSEC
65991+
65992+config GRKERNSEC_KMEM
65993+ bool "Deny reading/writing to /dev/kmem, /dev/mem, and /dev/port"
65994+ default y if GRKERNSEC_CONFIG_AUTO
65995+ select STRICT_DEVMEM if (X86 || ARM || TILE || S390)
65996+ help
65997+ If you say Y here, /dev/kmem and /dev/mem won't be allowed to
65998+ be written to or read from to modify or leak the contents of the running
65999+ kernel. /dev/port will also not be allowed to be opened, writing to
66000+ /dev/cpu/*/msr will be prevented, and support for kexec will be removed.
66001+ If you have module support disabled, enabling this will close up several
66002+ ways that are currently used to insert malicious code into the running
66003+ kernel.
66004+
66005+ Even with this feature enabled, we still highly recommend that
66006+ you use the RBAC system, as it is still possible for an attacker to
66007+ modify the running kernel through other more obscure methods.
66008+
66009+ It is highly recommended that you say Y here if you meet all the
66010+ conditions above.
66011+
66012+config GRKERNSEC_VM86
66013+ bool "Restrict VM86 mode"
66014+ default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
66015+ depends on X86_32
66016+
66017+ help
66018+ If you say Y here, only processes with CAP_SYS_RAWIO will be able to
66019+ make use of a special execution mode on 32bit x86 processors called
66020+ Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain
66021+ video cards and will still work with this option enabled. The purpose
66022+ of the option is to prevent exploitation of emulation errors in
66023+ virtualization of vm86 mode like the one discovered in VMWare in 2009.
66024+ Nearly all users should be able to enable this option.
66025+
66026+config GRKERNSEC_IO
66027+ bool "Disable privileged I/O"
66028+ default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
66029+ depends on X86
66030+ select RTC_CLASS
66031+ select RTC_INTF_DEV
66032+ select RTC_DRV_CMOS
66033+
66034+ help
66035+ If you say Y here, all ioperm and iopl calls will return an error.
66036+ Ioperm and iopl can be used to modify the running kernel.
66037+ Unfortunately, some programs need this access to operate properly,
66038+ the most notable of which are XFree86 and hwclock. hwclock can be
66039+ remedied by having RTC support in the kernel, so real-time
66040+ clock support is enabled if this option is enabled, to ensure
66041+ that hwclock operates correctly. If hwclock still does not work,
66042+ either update udev or symlink /dev/rtc to /dev/rtc0.
66043+
66044+ If you're using XFree86 or a version of Xorg from 2012 or earlier,
66045+ you may not be able to boot into a graphical environment with this
66046+ option enabled. In this case, you should use the RBAC system instead.
66047+
66048+config GRKERNSEC_JIT_HARDEN
66049+ bool "Harden BPF JIT against spray attacks"
66050+ default y if GRKERNSEC_CONFIG_AUTO
66051+ depends on BPF_JIT && X86
66052+ help
66053+ If you say Y here, the native code generated by the kernel's Berkeley
66054+ Packet Filter (BPF) JIT engine will be hardened against JIT-spraying
66055+ attacks that attempt to fit attacker-beneficial instructions in
66056+ 32bit immediate fields of JIT-generated native instructions. The
66057+ attacker will generally aim to cause an unintended instruction sequence
66058+ of JIT-generated native code to execute by jumping into the middle of
66059+ a generated instruction. This feature effectively randomizes the 32bit
66060+ immediate constants present in the generated code to thwart such attacks.
66061+
66062+ If you're using KERNEXEC, it's recommended that you enable this option
66063+ to supplement the hardening of the kernel.
66064+
66065+config GRKERNSEC_PERF_HARDEN
66066+ bool "Disable unprivileged PERF_EVENTS usage by default"
66067+ default y if GRKERNSEC_CONFIG_AUTO
66068+ depends on PERF_EVENTS
66069+ help
66070+ If you say Y here, the range of acceptable values for the
66071+ /proc/sys/kernel/perf_event_paranoid sysctl will be expanded to allow and
66072+ default to a new value: 3. When the sysctl is set to this value, no
66073+ unprivileged use of the PERF_EVENTS syscall interface will be permitted.
66074+
66075+ Though PERF_EVENTS can be used legitimately for performance monitoring
66076+ and low-level application profiling, it is forced on regardless of
66077+ configuration, has been at fault for several vulnerabilities, and
66078+ creates new opportunities for side channels and other information leaks.
66079+
66080+ This feature puts PERF_EVENTS into a secure default state and permits
66081+ the administrator to change out of it temporarily if unprivileged
66082+ application profiling is needed.
66083+
66084+config GRKERNSEC_RAND_THREADSTACK
66085+ bool "Insert random gaps between thread stacks"
66086+ default y if GRKERNSEC_CONFIG_AUTO
66087+ depends on PAX_RANDMMAP && !PPC
66088+ help
66089+ If you say Y here, a random-sized gap will be enforced between allocated
66090+ thread stacks. Glibc's NPTL and other threading libraries that
66091+ pass MAP_STACK to the kernel for thread stack allocation are supported.
66092+ The implementation currently provides 8 bits of entropy for the gap.
66093+
66094+ Many distributions do not compile threaded remote services with the
66095+ -fstack-check argument to GCC, causing the variable-sized stack-based
66096+ allocator, alloca(), to not probe the stack on allocation. This
66097+ permits an unbounded alloca() to skip over any guard page and potentially
66098+ modify another thread's stack reliably. An enforced random gap
66099+ reduces the reliability of such an attack and increases the chance
66100+ that such a read/write to another thread's stack instead lands in
66101+ an unmapped area, causing a crash and triggering grsecurity's
66102+ anti-bruteforcing logic.
66103+
66104+config GRKERNSEC_PROC_MEMMAP
66105+ bool "Harden ASLR against information leaks and entropy reduction"
66106+ default y if (GRKERNSEC_CONFIG_AUTO || PAX_NOEXEC || PAX_ASLR)
66107+ depends on PAX_NOEXEC || PAX_ASLR
66108+ help
66109+ If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
66110+ give no information about the addresses of its mappings if
66111+ PaX features that rely on random addresses are enabled on the task.
66112+ In addition to sanitizing this information and disabling other
66113+ dangerous sources of information, this option causes reads of sensitive
66114+ /proc/<pid> entries where the file descriptor was opened in a different
66115+ task than the one performing the read. Such attempts are logged.
66116+ This option also limits argv/env strings for suid/sgid binaries
66117+ to 512KB to prevent a complete exhaustion of the stack entropy provided
66118+ by ASLR. Finally, it places an 8MB stack resource limit on suid/sgid
66119+ binaries to prevent alternative mmap layouts from being abused.
66120+
66121+ If you use PaX it is essential that you say Y here as it closes up
66122+ several holes that make full ASLR useless locally.
66123+
66124+
66125+config GRKERNSEC_KSTACKOVERFLOW
66126+ bool "Prevent kernel stack overflows"
66127+ default y if GRKERNSEC_CONFIG_AUTO
66128+ depends on !IA64 && 64BIT
66129+ help
66130+ If you say Y here, the kernel's process stacks will be allocated
66131+ with vmalloc instead of the kernel's default allocator. This
66132+ introduces guard pages that in combination with the alloca checking
66133+ of the STACKLEAK feature prevents all forms of kernel process stack
66134+ overflow abuse. Note that this is different from kernel stack
66135+ buffer overflows.
66136+
66137+config GRKERNSEC_BRUTE
66138+ bool "Deter exploit bruteforcing"
66139+ default y if GRKERNSEC_CONFIG_AUTO
66140+ help
66141+ If you say Y here, attempts to bruteforce exploits against forking
66142+ daemons such as apache or sshd, as well as against suid/sgid binaries
66143+ will be deterred. When a child of a forking daemon is killed by PaX
66144+ or crashes due to an illegal instruction or other suspicious signal,
66145+ the parent process will be delayed 30 seconds upon every subsequent
66146+ fork until the administrator is able to assess the situation and
66147+ restart the daemon.
66148+ In the suid/sgid case, the attempt is logged, the user has all their
66149+ existing instances of the suid/sgid binary terminated and will
66150+ be unable to execute any suid/sgid binaries for 15 minutes.
66151+
66152+ It is recommended that you also enable signal logging in the auditing
66153+ section so that logs are generated when a process triggers a suspicious
66154+ signal.
66155+ If the sysctl option is enabled, a sysctl option with name
66156+ "deter_bruteforce" is created.
66157+
66158+config GRKERNSEC_MODHARDEN
66159+ bool "Harden module auto-loading"
66160+ default y if GRKERNSEC_CONFIG_AUTO
66161+ depends on MODULES
66162+ help
66163+ If you say Y here, module auto-loading in response to use of some
66164+ feature implemented by an unloaded module will be restricted to
66165+ root users. Enabling this option helps defend against attacks
66166+ by unprivileged users who abuse the auto-loading behavior to
66167+ cause a vulnerable module to load that is then exploited.
66168+
66169+ If this option prevents a legitimate use of auto-loading for a
66170+ non-root user, the administrator can execute modprobe manually
66171+ with the exact name of the module mentioned in the alert log.
66172+ Alternatively, the administrator can add the module to the list
66173+ of modules loaded at boot by modifying init scripts.
66174+
66175+ Modification of init scripts will most likely be needed on
66176+ Ubuntu servers with encrypted home directory support enabled,
66177+ as the first non-root user logging in will cause the ecb(aes),
66178+ ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded.
66179+
66180+config GRKERNSEC_HIDESYM
66181+ bool "Hide kernel symbols"
66182+ default y if GRKERNSEC_CONFIG_AUTO
66183+ select PAX_USERCOPY_SLABS
66184+ help
66185+ If you say Y here, getting information on loaded modules, and
66186+ displaying all kernel symbols through a syscall will be restricted
66187+ to users with CAP_SYS_MODULE. For software compatibility reasons,
66188+ /proc/kallsyms will be restricted to the root user. The RBAC
66189+ system can hide that entry even from root.
66190+
66191+ This option also prevents leaking of kernel addresses through
66192+ several /proc entries.
66193+
66194+ Note that this option is only effective provided the following
66195+ conditions are met:
66196+ 1) The kernel using grsecurity is not precompiled by some distribution
66197+ 2) You have also enabled GRKERNSEC_DMESG
66198+ 3) You are using the RBAC system and hiding other files such as your
66199+ kernel image and System.map. Alternatively, enabling this option
66200+ causes the permissions on /boot, /lib/modules, and the kernel
66201+ source directory to change at compile time to prevent
66202+ reading by non-root users.
66203+ If the above conditions are met, this option will aid in providing a
66204+ useful protection against local kernel exploitation of overflows
66205+ and arbitrary read/write vulnerabilities.
66206+
66207+ It is highly recommended that you enable GRKERNSEC_PERF_HARDEN
66208+ in addition to this feature.
66209+
66210+config GRKERNSEC_RANDSTRUCT
66211+ bool "Randomize layout of sensitive kernel structures"
66212+ default y if GRKERNSEC_CONFIG_AUTO
66213+ select GRKERNSEC_HIDESYM
66214+ select MODVERSIONS if MODULES
66215+ help
66216+ If you say Y here, the layouts of a number of sensitive kernel
66217+ structures (task, fs, cred, etc) and all structures composed entirely
66218+ of function pointers (aka "ops" structs) will be randomized at compile-time.
66219+ This can introduce the requirement of an additional infoleak
66220+ vulnerability for exploits targeting these structure types.
66221+
66222+ Enabling this feature will introduce some performance impact, slightly
66223+ increase memory usage, and prevent the use of forensic tools like
66224+ Volatility against the system (unless the kernel source tree isn't
66225+ cleaned after kernel installation).
66226+
66227+ The seed used for compilation is located at tools/gcc/randomize_layout_seed.h.
66228+ It remains after a make clean to allow for external modules to be compiled
66229+ with the existing seed and will be removed by a make mrproper or
66230+ make distclean.
66231+
66232+ Note that the implementation requires gcc 4.6.4. or newer. You may need
66233+ to install the supporting headers explicitly in addition to the normal
66234+ gcc package.
66235+
66236+config GRKERNSEC_RANDSTRUCT_PERFORMANCE
66237+ bool "Use cacheline-aware structure randomization"
66238+ depends on GRKERNSEC_RANDSTRUCT
66239+ default y if GRKERNSEC_CONFIG_PRIORITY_PERF
66240+ help
66241+ If you say Y here, the RANDSTRUCT randomization will make a best effort
66242+ at restricting randomization to cacheline-sized groups of elements. It
66243+ will further not randomize bitfields in structures. This reduces the
66244+ performance hit of RANDSTRUCT at the cost of weakened randomization.
66245+
66246+config GRKERNSEC_KERN_LOCKOUT
66247+ bool "Active kernel exploit response"
66248+ default y if GRKERNSEC_CONFIG_AUTO
66249+ depends on X86 || ARM || PPC || SPARC
66250+ help
66251+ If you say Y here, when a PaX alert is triggered due to suspicious
66252+ activity in the kernel (from KERNEXEC/UDEREF/USERCOPY)
66253+ or an OOPS occurs due to bad memory accesses, instead of just
66254+ terminating the offending process (and potentially allowing
66255+ a subsequent exploit from the same user), we will take one of two
66256+ actions:
66257+ If the user was root, we will panic the system
66258+ If the user was non-root, we will log the attempt, terminate
66259+ all processes owned by the user, then prevent them from creating
66260+ any new processes until the system is restarted
66261+ This deters repeated kernel exploitation/bruteforcing attempts
66262+ and is useful for later forensics.
66263+
66264+config GRKERNSEC_OLD_ARM_USERLAND
66265+ bool "Old ARM userland compatibility"
66266+ depends on ARM && (CPU_V6 || CPU_V6K || CPU_V7)
66267+ help
66268+ If you say Y here, stubs of executable code to perform such operations
66269+ as "compare-exchange" will be placed at fixed locations in the ARM vector
66270+ table. This is unfortunately needed for old ARM userland meant to run
66271+ across a wide range of processors. Without this option enabled,
66272+ the get_tls and data memory barrier stubs will be emulated by the kernel,
66273+ which is enough for Linaro userlands or other userlands designed for v6
66274+ and newer ARM CPUs. It's recommended that you try without this option enabled
66275+ first, and only enable it if your userland does not boot (it will likely fail
66276+ at init time).
66277+
66278+endmenu
66279+menu "Role Based Access Control Options"
66280+depends on GRKERNSEC
66281+
66282+config GRKERNSEC_RBAC_DEBUG
66283+ bool
66284+
66285+config GRKERNSEC_NO_RBAC
66286+ bool "Disable RBAC system"
66287+ help
66288+ If you say Y here, the /dev/grsec device will be removed from the kernel,
66289+ preventing the RBAC system from being enabled. You should only say Y
66290+ here if you have no intention of using the RBAC system, so as to prevent
66291+ an attacker with root access from misusing the RBAC system to hide files
66292+ and processes when loadable module support and /dev/[k]mem have been
66293+ locked down.
66294+
66295+config GRKERNSEC_ACL_HIDEKERN
66296+ bool "Hide kernel processes"
66297+ help
66298+ If you say Y here, all kernel threads will be hidden to all
66299+ processes but those whose subject has the "view hidden processes"
66300+ flag.
66301+
66302+config GRKERNSEC_ACL_MAXTRIES
66303+ int "Maximum tries before password lockout"
66304+ default 3
66305+ help
66306+ This option enforces the maximum number of times a user can attempt
66307+ to authorize themselves with the grsecurity RBAC system before being
66308+ denied the ability to attempt authorization again for a specified time.
66309+ The lower the number, the harder it will be to brute-force a password.
66310+
66311+config GRKERNSEC_ACL_TIMEOUT
66312+ int "Time to wait after max password tries, in seconds"
66313+ default 30
66314+ help
66315+ This option specifies the time the user must wait after attempting to
66316+ authorize to the RBAC system with the maximum number of invalid
66317+ passwords. The higher the number, the harder it will be to brute-force
66318+ a password.
66319+
66320+endmenu
66321+menu "Filesystem Protections"
66322+depends on GRKERNSEC
66323+
66324+config GRKERNSEC_PROC
66325+ bool "Proc restrictions"
66326+ default y if GRKERNSEC_CONFIG_AUTO
66327+ help
66328+ If you say Y here, the permissions of the /proc filesystem
66329+ will be altered to enhance system security and privacy. You MUST
66330+ choose either a user only restriction or a user and group restriction.
66331+ Depending upon the option you choose, you can either restrict users to
66332+ see only the processes they themselves run, or choose a group that can
66333+ view all processes and files normally restricted to root if you choose
66334+ the "restrict to user only" option. NOTE: If you're running identd or
66335+ ntpd as a non-root user, you will have to run it as the group you
66336+ specify here.
66337+
66338+config GRKERNSEC_PROC_USER
66339+ bool "Restrict /proc to user only"
66340+ depends on GRKERNSEC_PROC
66341+ help
66342+ If you say Y here, non-root users will only be able to view their own
66343+ processes, and restricts them from viewing network-related information,
66344+ and viewing kernel symbol and module information.
66345+
66346+config GRKERNSEC_PROC_USERGROUP
66347+ bool "Allow special group"
66348+ default y if GRKERNSEC_CONFIG_AUTO
66349+ depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
66350+ help
66351+ If you say Y here, you will be able to select a group that will be
66352+ able to view all processes and network-related information. If you've
66353+ enabled GRKERNSEC_HIDESYM, kernel and symbol information may still
66354+ remain hidden. This option is useful if you want to run identd as
66355+ a non-root user. The group you select may also be chosen at boot time
66356+ via "grsec_proc_gid=" on the kernel commandline.
66357+
66358+config GRKERNSEC_PROC_GID
66359+ int "GID for special group"
66360+ depends on GRKERNSEC_PROC_USERGROUP
66361+ default 1001
66362+
66363+config GRKERNSEC_PROC_ADD
66364+ bool "Additional restrictions"
66365+ default y if GRKERNSEC_CONFIG_AUTO
66366+ depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
66367+ help
66368+ If you say Y here, additional restrictions will be placed on
66369+ /proc that keep normal users from viewing device information and
66370+ slabinfo information that could be useful for exploits.
66371+
66372+config GRKERNSEC_LINK
66373+ bool "Linking restrictions"
66374+ default y if GRKERNSEC_CONFIG_AUTO
66375+ help
66376+ If you say Y here, /tmp race exploits will be prevented, since users
66377+ will no longer be able to follow symlinks owned by other users in
66378+ world-writable +t directories (e.g. /tmp), unless the owner of the
66379+ symlink is the owner of the directory. users will also not be
66380+ able to hardlink to files they do not own. If the sysctl option is
66381+ enabled, a sysctl option with name "linking_restrictions" is created.
66382+
66383+config GRKERNSEC_SYMLINKOWN
66384+ bool "Kernel-enforced SymlinksIfOwnerMatch"
66385+ default y if GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER
66386+ help
66387+ Apache's SymlinksIfOwnerMatch option has an inherent race condition
66388+ that prevents it from being used as a security feature. As Apache
66389+ verifies the symlink by performing a stat() against the target of
66390+ the symlink before it is followed, an attacker can setup a symlink
66391+ to point to a same-owned file, then replace the symlink with one
66392+ that targets another user's file just after Apache "validates" the
66393+ symlink -- a classic TOCTOU race. If you say Y here, a complete,
66394+ race-free replacement for Apache's "SymlinksIfOwnerMatch" option
66395+ will be in place for the group you specify. If the sysctl option
66396+ is enabled, a sysctl option with name "enforce_symlinksifowner" is
66397+ created.
66398+
66399+config GRKERNSEC_SYMLINKOWN_GID
66400+ int "GID for users with kernel-enforced SymlinksIfOwnerMatch"
66401+ depends on GRKERNSEC_SYMLINKOWN
66402+ default 1006
66403+ help
66404+ Setting this GID determines what group kernel-enforced
66405+ SymlinksIfOwnerMatch will be enabled for. If the sysctl option
66406+ is enabled, a sysctl option with name "symlinkown_gid" is created.
66407+
66408+config GRKERNSEC_FIFO
66409+ bool "FIFO restrictions"
66410+ default y if GRKERNSEC_CONFIG_AUTO
66411+ help
66412+ If you say Y here, users will not be able to write to FIFOs they don't
66413+ own in world-writable +t directories (e.g. /tmp), unless the owner of
66414+ the FIFO is the same owner of the directory it's held in. If the sysctl
66415+ option is enabled, a sysctl option with name "fifo_restrictions" is
66416+ created.
66417+
66418+config GRKERNSEC_SYSFS_RESTRICT
66419+ bool "Sysfs/debugfs restriction"
66420+ default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
66421+ depends on SYSFS
66422+ help
66423+ If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and
66424+ any filesystem normally mounted under it (e.g. debugfs) will be
66425+ mostly accessible only by root. These filesystems generally provide access
66426+ to hardware and debug information that isn't appropriate for unprivileged
66427+ users of the system. Sysfs and debugfs have also become a large source
66428+ of new vulnerabilities, ranging from infoleaks to local compromise.
66429+ There has been very little oversight with an eye toward security involved
66430+ in adding new exporters of information to these filesystems, so their
66431+ use is discouraged.
66432+ For reasons of compatibility, a few directories have been whitelisted
66433+ for access by non-root users:
66434+ /sys/fs/selinux
66435+ /sys/fs/fuse
66436+ /sys/devices/system/cpu
66437+
66438+config GRKERNSEC_ROFS
66439+ bool "Runtime read-only mount protection"
66440+ depends on SYSCTL
66441+ help
66442+ If you say Y here, a sysctl option with name "romount_protect" will
66443+ be created. By setting this option to 1 at runtime, filesystems
66444+ will be protected in the following ways:
66445+ * No new writable mounts will be allowed
66446+ * Existing read-only mounts won't be able to be remounted read/write
66447+ * Write operations will be denied on all block devices
66448+ This option acts independently of grsec_lock: once it is set to 1,
66449+ it cannot be turned off. Therefore, please be mindful of the resulting
66450+ behavior if this option is enabled in an init script on a read-only
66451+ filesystem.
66452+ Also be aware that as with other root-focused features, GRKERNSEC_KMEM
66453+ and GRKERNSEC_IO should be enabled and module loading disabled via
66454+ config or at runtime.
66455+ This feature is mainly intended for secure embedded systems.
66456+
66457+
66458+config GRKERNSEC_DEVICE_SIDECHANNEL
66459+ bool "Eliminate stat/notify-based device sidechannels"
66460+ default y if GRKERNSEC_CONFIG_AUTO
66461+ help
66462+ If you say Y here, timing analyses on block or character
66463+ devices like /dev/ptmx using stat or inotify/dnotify/fanotify
66464+ will be thwarted for unprivileged users. If a process without
66465+ CAP_MKNOD stats such a device, the last access and last modify times
66466+ will match the device's create time. No access or modify events
66467+ will be triggered through inotify/dnotify/fanotify for such devices.
66468+ This feature will prevent attacks that may at a minimum
66469+ allow an attacker to determine the administrator's password length.
66470+
66471+config GRKERNSEC_CHROOT
66472+ bool "Chroot jail restrictions"
66473+ default y if GRKERNSEC_CONFIG_AUTO
66474+ help
66475+ If you say Y here, you will be able to choose several options that will
66476+ make breaking out of a chrooted jail much more difficult. If you
66477+ encounter no software incompatibilities with the following options, it
66478+ is recommended that you enable each one.
66479+
66480+ Note that the chroot restrictions are not intended to apply to "chroots"
66481+ to directories that are simple bind mounts of the global root filesystem.
66482+ For several other reasons, a user shouldn't expect any significant
66483+ security by performing such a chroot.
66484+
66485+config GRKERNSEC_CHROOT_MOUNT
66486+ bool "Deny mounts"
66487+ default y if GRKERNSEC_CONFIG_AUTO
66488+ depends on GRKERNSEC_CHROOT
66489+ help
66490+ If you say Y here, processes inside a chroot will not be able to
66491+ mount or remount filesystems. If the sysctl option is enabled, a
66492+ sysctl option with name "chroot_deny_mount" is created.
66493+
66494+config GRKERNSEC_CHROOT_DOUBLE
66495+ bool "Deny double-chroots"
66496+ default y if GRKERNSEC_CONFIG_AUTO
66497+ depends on GRKERNSEC_CHROOT
66498+ help
66499+ If you say Y here, processes inside a chroot will not be able to chroot
66500+ again outside the chroot. This is a widely used method of breaking
66501+ out of a chroot jail and should not be allowed. If the sysctl
66502+ option is enabled, a sysctl option with name
66503+ "chroot_deny_chroot" is created.
66504+
66505+config GRKERNSEC_CHROOT_PIVOT
66506+ bool "Deny pivot_root in chroot"
66507+ default y if GRKERNSEC_CONFIG_AUTO
66508+ depends on GRKERNSEC_CHROOT
66509+ help
66510+ If you say Y here, processes inside a chroot will not be able to use
66511+ a function called pivot_root() that was introduced in Linux 2.3.41. It
66512+ works similar to chroot in that it changes the root filesystem. This
66513+ function could be misused in a chrooted process to attempt to break out
66514+ of the chroot, and therefore should not be allowed. If the sysctl
66515+ option is enabled, a sysctl option with name "chroot_deny_pivot" is
66516+ created.
66517+
66518+config GRKERNSEC_CHROOT_CHDIR
66519+ bool "Enforce chdir(\"/\") on all chroots"
66520+ default y if GRKERNSEC_CONFIG_AUTO
66521+ depends on GRKERNSEC_CHROOT
66522+ help
66523+ If you say Y here, the current working directory of all newly-chrooted
66524+ applications will be set to the the root directory of the chroot.
66525+ The man page on chroot(2) states:
66526+ Note that this call does not change the current working
66527+ directory, so that `.' can be outside the tree rooted at
66528+ `/'. In particular, the super-user can escape from a
66529+ `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
66530+
66531+ It is recommended that you say Y here, since it's not known to break
66532+ any software. If the sysctl option is enabled, a sysctl option with
66533+ name "chroot_enforce_chdir" is created.
66534+
66535+config GRKERNSEC_CHROOT_CHMOD
66536+ bool "Deny (f)chmod +s"
66537+ default y if GRKERNSEC_CONFIG_AUTO
66538+ depends on GRKERNSEC_CHROOT
66539+ help
66540+ If you say Y here, processes inside a chroot will not be able to chmod
66541+ or fchmod files to make them have suid or sgid bits. This protects
66542+ against another published method of breaking a chroot. If the sysctl
66543+ option is enabled, a sysctl option with name "chroot_deny_chmod" is
66544+ created.
66545+
66546+config GRKERNSEC_CHROOT_FCHDIR
66547+ bool "Deny fchdir out of chroot"
66548+ default y if GRKERNSEC_CONFIG_AUTO
66549+ depends on GRKERNSEC_CHROOT
66550+ help
66551+ If you say Y here, a well-known method of breaking chroots by fchdir'ing
66552+ to a file descriptor of the chrooting process that points to a directory
66553+ outside the filesystem will be stopped. If the sysctl option
66554+ is enabled, a sysctl option with name "chroot_deny_fchdir" is created.
66555+
66556+config GRKERNSEC_CHROOT_MKNOD
66557+ bool "Deny mknod"
66558+ default y if GRKERNSEC_CONFIG_AUTO
66559+ depends on GRKERNSEC_CHROOT
66560+ help
66561+ If you say Y here, processes inside a chroot will not be allowed to
66562+ mknod. The problem with using mknod inside a chroot is that it
66563+ would allow an attacker to create a device entry that is the same
66564+ as one on the physical root of your system, which could range from
66565+ anything from the console device to a device for your harddrive (which
66566+ they could then use to wipe the drive or steal data). It is recommended
66567+ that you say Y here, unless you run into software incompatibilities.
66568+ If the sysctl option is enabled, a sysctl option with name
66569+ "chroot_deny_mknod" is created.
66570+
66571+config GRKERNSEC_CHROOT_SHMAT
66572+ bool "Deny shmat() out of chroot"
66573+ default y if GRKERNSEC_CONFIG_AUTO
66574+ depends on GRKERNSEC_CHROOT
66575+ help
66576+ If you say Y here, processes inside a chroot will not be able to attach
66577+ to shared memory segments that were created outside of the chroot jail.
66578+ It is recommended that you say Y here. If the sysctl option is enabled,
66579+ a sysctl option with name "chroot_deny_shmat" is created.
66580+
66581+config GRKERNSEC_CHROOT_UNIX
66582+ bool "Deny access to abstract AF_UNIX sockets out of chroot"
66583+ default y if GRKERNSEC_CONFIG_AUTO
66584+ depends on GRKERNSEC_CHROOT
66585+ help
66586+ If you say Y here, processes inside a chroot will not be able to
66587+ connect to abstract (meaning not belonging to a filesystem) Unix
66588+ domain sockets that were bound outside of a chroot. It is recommended
66589+ that you say Y here. If the sysctl option is enabled, a sysctl option
66590+ with name "chroot_deny_unix" is created.
66591+
66592+config GRKERNSEC_CHROOT_FINDTASK
66593+ bool "Protect outside processes"
66594+ default y if GRKERNSEC_CONFIG_AUTO
66595+ depends on GRKERNSEC_CHROOT
66596+ help
66597+ If you say Y here, processes inside a chroot will not be able to
66598+ kill, send signals with fcntl, ptrace, capget, getpgid, setpgid,
66599+ getsid, or view any process outside of the chroot. If the sysctl
66600+ option is enabled, a sysctl option with name "chroot_findtask" is
66601+ created.
66602+
66603+config GRKERNSEC_CHROOT_NICE
66604+ bool "Restrict priority changes"
66605+ default y if GRKERNSEC_CONFIG_AUTO
66606+ depends on GRKERNSEC_CHROOT
66607+ help
66608+ If you say Y here, processes inside a chroot will not be able to raise
66609+ the priority of processes in the chroot, or alter the priority of
66610+ processes outside the chroot. This provides more security than simply
66611+ removing CAP_SYS_NICE from the process' capability set. If the
66612+ sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
66613+ is created.
66614+
66615+config GRKERNSEC_CHROOT_SYSCTL
66616+ bool "Deny sysctl writes"
66617+ default y if GRKERNSEC_CONFIG_AUTO
66618+ depends on GRKERNSEC_CHROOT
66619+ help
66620+ If you say Y here, an attacker in a chroot will not be able to
66621+ write to sysctl entries, either by sysctl(2) or through a /proc
66622+ interface. It is strongly recommended that you say Y here. If the
66623+ sysctl option is enabled, a sysctl option with name
66624+ "chroot_deny_sysctl" is created.
66625+
66626+config GRKERNSEC_CHROOT_CAPS
66627+ bool "Capability restrictions"
66628+ default y if GRKERNSEC_CONFIG_AUTO
66629+ depends on GRKERNSEC_CHROOT
66630+ help
66631+ If you say Y here, the capabilities on all processes within a
66632+ chroot jail will be lowered to stop module insertion, raw i/o,
66633+ system and net admin tasks, rebooting the system, modifying immutable
66634+ files, modifying IPC owned by another, and changing the system time.
66635+ This is left an option because it can break some apps. Disable this
66636+ if your chrooted apps are having problems performing those kinds of
66637+ tasks. If the sysctl option is enabled, a sysctl option with
66638+ name "chroot_caps" is created.
66639+
66640+config GRKERNSEC_CHROOT_INITRD
66641+ bool "Exempt initrd tasks from restrictions"
66642+ default y if GRKERNSEC_CONFIG_AUTO
66643+ depends on GRKERNSEC_CHROOT && BLK_DEV_INITRD
66644+ help
66645+ If you say Y here, tasks started prior to init will be exempted from
66646+ grsecurity's chroot restrictions. This option is mainly meant to
66647+ resolve Plymouth's performing privileged operations unnecessarily
66648+ in a chroot.
66649+
66650+endmenu
66651+menu "Kernel Auditing"
66652+depends on GRKERNSEC
66653+
66654+config GRKERNSEC_AUDIT_GROUP
66655+ bool "Single group for auditing"
66656+ help
66657+ If you say Y here, the exec and chdir logging features will only operate
66658+ on a group you specify. This option is recommended if you only want to
66659+ watch certain users instead of having a large amount of logs from the
66660+ entire system. If the sysctl option is enabled, a sysctl option with
66661+ name "audit_group" is created.
66662+
66663+config GRKERNSEC_AUDIT_GID
66664+ int "GID for auditing"
66665+ depends on GRKERNSEC_AUDIT_GROUP
66666+ default 1007
66667+
66668+config GRKERNSEC_EXECLOG
66669+ bool "Exec logging"
66670+ help
66671+ If you say Y here, all execve() calls will be logged (since the
66672+ other exec*() calls are frontends to execve(), all execution
66673+ will be logged). Useful for shell-servers that like to keep track
66674+ of their users. If the sysctl option is enabled, a sysctl option with
66675+ name "exec_logging" is created.
66676+ WARNING: This option when enabled will produce a LOT of logs, especially
66677+ on an active system.
66678+
66679+config GRKERNSEC_RESLOG
66680+ bool "Resource logging"
66681+ default y if GRKERNSEC_CONFIG_AUTO
66682+ help
66683+ If you say Y here, all attempts to overstep resource limits will
66684+ be logged with the resource name, the requested size, and the current
66685+ limit. It is highly recommended that you say Y here. If the sysctl
66686+ option is enabled, a sysctl option with name "resource_logging" is
66687+ created. If the RBAC system is enabled, the sysctl value is ignored.
66688+
66689+config GRKERNSEC_CHROOT_EXECLOG
66690+ bool "Log execs within chroot"
66691+ help
66692+ If you say Y here, all executions inside a chroot jail will be logged
66693+ to syslog. This can cause a large amount of logs if certain
66694+ applications (eg. djb's daemontools) are installed on the system, and
66695+ is therefore left as an option. If the sysctl option is enabled, a
66696+ sysctl option with name "chroot_execlog" is created.
66697+
66698+config GRKERNSEC_AUDIT_PTRACE
66699+ bool "Ptrace logging"
66700+ help
66701+ If you say Y here, all attempts to attach to a process via ptrace
66702+ will be logged. If the sysctl option is enabled, a sysctl option
66703+ with name "audit_ptrace" is created.
66704+
66705+config GRKERNSEC_AUDIT_CHDIR
66706+ bool "Chdir logging"
66707+ help
66708+ If you say Y here, all chdir() calls will be logged. If the sysctl
66709+ option is enabled, a sysctl option with name "audit_chdir" is created.
66710+
66711+config GRKERNSEC_AUDIT_MOUNT
66712+ bool "(Un)Mount logging"
66713+ help
66714+ If you say Y here, all mounts and unmounts will be logged. If the
66715+ sysctl option is enabled, a sysctl option with name "audit_mount" is
66716+ created.
66717+
66718+config GRKERNSEC_SIGNAL
66719+ bool "Signal logging"
66720+ default y if GRKERNSEC_CONFIG_AUTO
66721+ help
66722+ If you say Y here, certain important signals will be logged, such as
66723+ SIGSEGV, which will as a result inform you of when a error in a program
66724+ occurred, which in some cases could mean a possible exploit attempt.
66725+ If the sysctl option is enabled, a sysctl option with name
66726+ "signal_logging" is created.
66727+
66728+config GRKERNSEC_FORKFAIL
66729+ bool "Fork failure logging"
66730+ help
66731+ If you say Y here, all failed fork() attempts will be logged.
66732+ This could suggest a fork bomb, or someone attempting to overstep
66733+ their process limit. If the sysctl option is enabled, a sysctl option
66734+ with name "forkfail_logging" is created.
66735+
66736+config GRKERNSEC_TIME
66737+ bool "Time change logging"
66738+ default y if GRKERNSEC_CONFIG_AUTO
66739+ help
66740+ If you say Y here, any changes of the system clock will be logged.
66741+ If the sysctl option is enabled, a sysctl option with name
66742+ "timechange_logging" is created.
66743+
66744+config GRKERNSEC_PROC_IPADDR
66745+ bool "/proc/<pid>/ipaddr support"
66746+ default y if GRKERNSEC_CONFIG_AUTO
66747+ help
66748+ If you say Y here, a new entry will be added to each /proc/<pid>
66749+ directory that contains the IP address of the person using the task.
66750+ The IP is carried across local TCP and AF_UNIX stream sockets.
66751+ This information can be useful for IDS/IPSes to perform remote response
66752+ to a local attack. The entry is readable by only the owner of the
66753+ process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
66754+ the RBAC system), and thus does not create privacy concerns.
66755+
66756+config GRKERNSEC_RWXMAP_LOG
66757+ bool 'Denied RWX mmap/mprotect logging'
66758+ default y if GRKERNSEC_CONFIG_AUTO
66759+ depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT
66760+ help
66761+ If you say Y here, calls to mmap() and mprotect() with explicit
66762+ usage of PROT_WRITE and PROT_EXEC together will be logged when
66763+ denied by the PAX_MPROTECT feature. This feature will also
66764+ log other problematic scenarios that can occur when PAX_MPROTECT
66765+ is enabled on a binary, like textrels and PT_GNU_STACK. If the
66766+ sysctl option is enabled, a sysctl option with name "rwxmap_logging"
66767+ is created.
66768+
66769+endmenu
66770+
66771+menu "Executable Protections"
66772+depends on GRKERNSEC
66773+
66774+config GRKERNSEC_DMESG
66775+ bool "Dmesg(8) restriction"
66776+ default y if GRKERNSEC_CONFIG_AUTO
66777+ help
66778+ If you say Y here, non-root users will not be able to use dmesg(8)
66779+ to view the contents of the kernel's circular log buffer.
66780+ The kernel's log buffer often contains kernel addresses and other
66781+ identifying information useful to an attacker in fingerprinting a
66782+ system for a targeted exploit.
66783+ If the sysctl option is enabled, a sysctl option with name "dmesg" is
66784+ created.
66785+
66786+config GRKERNSEC_HARDEN_PTRACE
66787+ bool "Deter ptrace-based process snooping"
66788+ default y if GRKERNSEC_CONFIG_AUTO
66789+ help
66790+ If you say Y here, TTY sniffers and other malicious monitoring
66791+ programs implemented through ptrace will be defeated. If you
66792+ have been using the RBAC system, this option has already been
66793+ enabled for several years for all users, with the ability to make
66794+ fine-grained exceptions.
66795+
66796+ This option only affects the ability of non-root users to ptrace
66797+ processes that are not a descendent of the ptracing process.
66798+ This means that strace ./binary and gdb ./binary will still work,
66799+ but attaching to arbitrary processes will not. If the sysctl
66800+ option is enabled, a sysctl option with name "harden_ptrace" is
66801+ created.
66802+
66803+config GRKERNSEC_PTRACE_READEXEC
66804+ bool "Require read access to ptrace sensitive binaries"
66805+ default y if GRKERNSEC_CONFIG_AUTO
66806+ help
66807+ If you say Y here, unprivileged users will not be able to ptrace unreadable
66808+ binaries. This option is useful in environments that
66809+ remove the read bits (e.g. file mode 4711) from suid binaries to
66810+ prevent infoleaking of their contents. This option adds
66811+ consistency to the use of that file mode, as the binary could normally
66812+ be read out when run without privileges while ptracing.
66813+
66814+ If the sysctl option is enabled, a sysctl option with name "ptrace_readexec"
66815+ is created.
66816+
66817+config GRKERNSEC_SETXID
66818+ bool "Enforce consistent multithreaded privileges"
66819+ default y if GRKERNSEC_CONFIG_AUTO
66820+ depends on (X86 || SPARC64 || PPC || ARM || MIPS)
66821+ help
66822+ If you say Y here, a change from a root uid to a non-root uid
66823+ in a multithreaded application will cause the resulting uids,
66824+ gids, supplementary groups, and capabilities in that thread
66825+ to be propagated to the other threads of the process. In most
66826+ cases this is unnecessary, as glibc will emulate this behavior
66827+ on behalf of the application. Other libcs do not act in the
66828+ same way, allowing the other threads of the process to continue
66829+ running with root privileges. If the sysctl option is enabled,
66830+ a sysctl option with name "consistent_setxid" is created.
66831+
66832+config GRKERNSEC_HARDEN_IPC
66833+ bool "Disallow access to overly-permissive IPC objects"
66834+ default y if GRKERNSEC_CONFIG_AUTO
66835+ depends on SYSVIPC
66836+ help
66837+ If you say Y here, access to overly-permissive IPC objects (shared
66838+ memory, message queues, and semaphores) will be denied for processes
66839+ given the following criteria beyond normal permission checks:
66840+ 1) If the IPC object is world-accessible and the euid doesn't match
66841+ that of the creator or current uid for the IPC object
66842+ 2) If the IPC object is group-accessible and the egid doesn't
66843+ match that of the creator or current gid for the IPC object
66844+ It's a common error to grant too much permission to these objects,
66845+ with impact ranging from denial of service and information leaking to
66846+ privilege escalation. This feature was developed in response to
66847+ research by Tim Brown:
66848+ http://labs.portcullis.co.uk/whitepapers/memory-squatting-attacks-on-system-v-shared-memory/
66849+ who found hundreds of such insecure usages. Processes with
66850+ CAP_IPC_OWNER are still permitted to access these IPC objects.
66851+ If the sysctl option is enabled, a sysctl option with name
66852+ "harden_ipc" is created.
66853+
66854+config GRKERNSEC_TPE
66855+ bool "Trusted Path Execution (TPE)"
66856+ default y if GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER
66857+ help
66858+ If you say Y here, you will be able to choose a gid to add to the
66859+ supplementary groups of users you want to mark as "untrusted."
66860+ These users will not be able to execute any files that are not in
66861+ root-owned directories writable only by root. If the sysctl option
66862+ is enabled, a sysctl option with name "tpe" is created.
66863+
66864+config GRKERNSEC_TPE_ALL
66865+ bool "Partially restrict all non-root users"
66866+ depends on GRKERNSEC_TPE
66867+ help
66868+ If you say Y here, all non-root users will be covered under
66869+ a weaker TPE restriction. This is separate from, and in addition to,
66870+ the main TPE options that you have selected elsewhere. Thus, if a
66871+ "trusted" GID is chosen, this restriction applies to even that GID.
66872+ Under this restriction, all non-root users will only be allowed to
66873+ execute files in directories they own that are not group or
66874+ world-writable, or in directories owned by root and writable only by
66875+ root. If the sysctl option is enabled, a sysctl option with name
66876+ "tpe_restrict_all" is created.
66877+
66878+config GRKERNSEC_TPE_INVERT
66879+ bool "Invert GID option"
66880+ depends on GRKERNSEC_TPE
66881+ help
66882+ If you say Y here, the group you specify in the TPE configuration will
66883+ decide what group TPE restrictions will be *disabled* for. This
66884+ option is useful if you want TPE restrictions to be applied to most
66885+ users on the system. If the sysctl option is enabled, a sysctl option
66886+ with name "tpe_invert" is created. Unlike other sysctl options, this
66887+ entry will default to on for backward-compatibility.
66888+
66889+config GRKERNSEC_TPE_GID
66890+ int
66891+ default GRKERNSEC_TPE_UNTRUSTED_GID if (GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT)
66892+ default GRKERNSEC_TPE_TRUSTED_GID if (GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT)
66893+
66894+config GRKERNSEC_TPE_UNTRUSTED_GID
66895+ int "GID for TPE-untrusted users"
66896+ depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
66897+ default 1005
66898+ help
66899+ Setting this GID determines what group TPE restrictions will be
66900+ *enabled* for. If the sysctl option is enabled, a sysctl option
66901+ with name "tpe_gid" is created.
66902+
66903+config GRKERNSEC_TPE_TRUSTED_GID
66904+ int "GID for TPE-trusted users"
66905+ depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
66906+ default 1005
66907+ help
66908+ Setting this GID determines what group TPE restrictions will be
66909+ *disabled* for. If the sysctl option is enabled, a sysctl option
66910+ with name "tpe_gid" is created.
66911+
66912+endmenu
66913+menu "Network Protections"
66914+depends on GRKERNSEC
66915+
66916+config GRKERNSEC_RANDNET
66917+ bool "Larger entropy pools"
66918+ default y if GRKERNSEC_CONFIG_AUTO
66919+ help
66920+ If you say Y here, the entropy pools used for many features of Linux
66921+ and grsecurity will be doubled in size. Since several grsecurity
66922+ features use additional randomness, it is recommended that you say Y
66923+ here. Saying Y here has a similar effect as modifying
66924+ /proc/sys/kernel/random/poolsize.
66925+
66926+config GRKERNSEC_BLACKHOLE
66927+ bool "TCP/UDP blackhole and LAST_ACK DoS prevention"
66928+ default y if GRKERNSEC_CONFIG_AUTO
66929+ depends on NET
66930+ help
66931+ If you say Y here, neither TCP resets nor ICMP
66932+ destination-unreachable packets will be sent in response to packets
66933+ sent to ports for which no associated listening process exists.
66934+ This feature supports both IPV4 and IPV6 and exempts the
66935+ loopback interface from blackholing. Enabling this feature
66936+ makes a host more resilient to DoS attacks and reduces network
66937+ visibility against scanners.
66938+
66939+ The blackhole feature as-implemented is equivalent to the FreeBSD
66940+ blackhole feature, as it prevents RST responses to all packets, not
66941+ just SYNs. Under most application behavior this causes no
66942+ problems, but applications (like haproxy) may not close certain
66943+ connections in a way that cleanly terminates them on the remote
66944+ end, leaving the remote host in LAST_ACK state. Because of this
66945+ side-effect and to prevent intentional LAST_ACK DoSes, this
66946+ feature also adds automatic mitigation against such attacks.
66947+ The mitigation drastically reduces the amount of time a socket
66948+ can spend in LAST_ACK state. If you're using haproxy and not
66949+ all servers it connects to have this option enabled, consider
66950+ disabling this feature on the haproxy host.
66951+
66952+ If the sysctl option is enabled, two sysctl options with names
66953+ "ip_blackhole" and "lastack_retries" will be created.
66954+ While "ip_blackhole" takes the standard zero/non-zero on/off
66955+ toggle, "lastack_retries" uses the same kinds of values as
66956+ "tcp_retries1" and "tcp_retries2". The default value of 4
66957+ prevents a socket from lasting more than 45 seconds in LAST_ACK
66958+ state.
66959+
66960+config GRKERNSEC_NO_SIMULT_CONNECT
66961+ bool "Disable TCP Simultaneous Connect"
66962+ default y if GRKERNSEC_CONFIG_AUTO
66963+ depends on NET
66964+ help
66965+ If you say Y here, a feature by Willy Tarreau will be enabled that
66966+ removes a weakness in Linux's strict implementation of TCP that
66967+ allows two clients to connect to each other without either entering
66968+ a listening state. The weakness allows an attacker to easily prevent
66969+ a client from connecting to a known server provided the source port
66970+ for the connection is guessed correctly.
66971+
66972+ As the weakness could be used to prevent an antivirus or IPS from
66973+ fetching updates, or prevent an SSL gateway from fetching a CRL,
66974+ it should be eliminated by enabling this option. Though Linux is
66975+ one of few operating systems supporting simultaneous connect, it
66976+ has no legitimate use in practice and is rarely supported by firewalls.
66977+
66978+config GRKERNSEC_SOCKET
66979+ bool "Socket restrictions"
66980+ depends on NET
66981+ help
66982+ If you say Y here, you will be able to choose from several options.
66983+ If you assign a GID on your system and add it to the supplementary
66984+ groups of users you want to restrict socket access to, this patch
66985+ will perform up to three things, based on the option(s) you choose.
66986+
66987+config GRKERNSEC_SOCKET_ALL
66988+ bool "Deny any sockets to group"
66989+ depends on GRKERNSEC_SOCKET
66990+ help
66991+ If you say Y here, you will be able to choose a GID of whose users will
66992+ be unable to connect to other hosts from your machine or run server
66993+ applications from your machine. If the sysctl option is enabled, a
66994+ sysctl option with name "socket_all" is created.
66995+
66996+config GRKERNSEC_SOCKET_ALL_GID
66997+ int "GID to deny all sockets for"
66998+ depends on GRKERNSEC_SOCKET_ALL
66999+ default 1004
67000+ help
67001+ Here you can choose the GID to disable socket access for. Remember to
67002+ add the users you want socket access disabled for to the GID
67003+ specified here. If the sysctl option is enabled, a sysctl option
67004+ with name "socket_all_gid" is created.
67005+
67006+config GRKERNSEC_SOCKET_CLIENT
67007+ bool "Deny client sockets to group"
67008+ depends on GRKERNSEC_SOCKET
67009+ help
67010+ If you say Y here, you will be able to choose a GID of whose users will
67011+ be unable to connect to other hosts from your machine, but will be
67012+ able to run servers. If this option is enabled, all users in the group
67013+ you specify will have to use passive mode when initiating ftp transfers
67014+ from the shell on your machine. If the sysctl option is enabled, a
67015+ sysctl option with name "socket_client" is created.
67016+
67017+config GRKERNSEC_SOCKET_CLIENT_GID
67018+ int "GID to deny client sockets for"
67019+ depends on GRKERNSEC_SOCKET_CLIENT
67020+ default 1003
67021+ help
67022+ Here you can choose the GID to disable client socket access for.
67023+ Remember to add the users you want client socket access disabled for to
67024+ the GID specified here. If the sysctl option is enabled, a sysctl
67025+ option with name "socket_client_gid" is created.
67026+
67027+config GRKERNSEC_SOCKET_SERVER
67028+ bool "Deny server sockets to group"
67029+ depends on GRKERNSEC_SOCKET
67030+ help
67031+ If you say Y here, you will be able to choose a GID of whose users will
67032+ be unable to run server applications from your machine. If the sysctl
67033+ option is enabled, a sysctl option with name "socket_server" is created.
67034+
67035+config GRKERNSEC_SOCKET_SERVER_GID
67036+ int "GID to deny server sockets for"
67037+ depends on GRKERNSEC_SOCKET_SERVER
67038+ default 1002
67039+ help
67040+ Here you can choose the GID to disable server socket access for.
67041+ Remember to add the users you want server socket access disabled for to
67042+ the GID specified here. If the sysctl option is enabled, a sysctl
67043+ option with name "socket_server_gid" is created.
67044+
67045+endmenu
67046+
67047+menu "Physical Protections"
67048+depends on GRKERNSEC
67049+
67050+config GRKERNSEC_DENYUSB
67051+ bool "Deny new USB connections after toggle"
67052+ default y if GRKERNSEC_CONFIG_AUTO
67053+ depends on SYSCTL && USB_SUPPORT
67054+ help
67055+ If you say Y here, a new sysctl option with name "deny_new_usb"
67056+ will be created. Setting its value to 1 will prevent any new
67057+ USB devices from being recognized by the OS. Any attempted USB
67058+ device insertion will be logged. This option is intended to be
67059+ used against custom USB devices designed to exploit vulnerabilities
67060+ in various USB device drivers.
67061+
67062+ For greatest effectiveness, this sysctl should be set after any
67063+ relevant init scripts. This option is safe to enable in distros
67064+ as each user can choose whether or not to toggle the sysctl.
67065+
67066+config GRKERNSEC_DENYUSB_FORCE
67067+ bool "Reject all USB devices not connected at boot"
67068+ select USB
67069+ depends on GRKERNSEC_DENYUSB
67070+ help
67071+ If you say Y here, a variant of GRKERNSEC_DENYUSB will be enabled
67072+ that doesn't involve a sysctl entry. This option should only be
67073+ enabled if you're sure you want to deny all new USB connections
67074+ at runtime and don't want to modify init scripts. This should not
67075+ be enabled by distros. It forces the core USB code to be built
67076+ into the kernel image so that all devices connected at boot time
67077+ can be recognized and new USB device connections can be prevented
67078+ prior to init running.
67079+
67080+endmenu
67081+
67082+menu "Sysctl Support"
67083+depends on GRKERNSEC && SYSCTL
67084+
67085+config GRKERNSEC_SYSCTL
67086+ bool "Sysctl support"
67087+ default y if GRKERNSEC_CONFIG_AUTO
67088+ help
67089+ If you say Y here, you will be able to change the options that
67090+ grsecurity runs with at bootup, without having to recompile your
67091+ kernel. You can echo values to files in /proc/sys/kernel/grsecurity
67092+ to enable (1) or disable (0) various features. All the sysctl entries
67093+ are mutable until the "grsec_lock" entry is set to a non-zero value.
67094+ All features enabled in the kernel configuration are disabled at boot
67095+ if you do not say Y to the "Turn on features by default" option.
67096+ All options should be set at startup, and the grsec_lock entry should
67097+ be set to a non-zero value after all the options are set.
67098+ *THIS IS EXTREMELY IMPORTANT*
67099+
67100+config GRKERNSEC_SYSCTL_DISTRO
67101+ bool "Extra sysctl support for distro makers (READ HELP)"
67102+ depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO
67103+ help
67104+ If you say Y here, additional sysctl options will be created
67105+ for features that affect processes running as root. Therefore,
67106+ it is critical when using this option that the grsec_lock entry be
67107+ enabled after boot. Only distros with prebuilt kernel packages
67108+ with this option enabled that can ensure grsec_lock is enabled
67109+ after boot should use this option.
67110+ *Failure to set grsec_lock after boot makes all grsec features
67111+ this option covers useless*
67112+
67113+ Currently this option creates the following sysctl entries:
67114+ "Disable Privileged I/O": "disable_priv_io"
67115+
67116+config GRKERNSEC_SYSCTL_ON
67117+ bool "Turn on features by default"
67118+ default y if GRKERNSEC_CONFIG_AUTO
67119+ depends on GRKERNSEC_SYSCTL
67120+ help
67121+ If you say Y here, instead of having all features enabled in the
67122+ kernel configuration disabled at boot time, the features will be
67123+ enabled at boot time. It is recommended you say Y here unless
67124+ there is some reason you would want all sysctl-tunable features to
67125+ be disabled by default. As mentioned elsewhere, it is important
67126+ to enable the grsec_lock entry once you have finished modifying
67127+ the sysctl entries.
67128+
67129+endmenu
67130+menu "Logging Options"
67131+depends on GRKERNSEC
67132+
67133+config GRKERNSEC_FLOODTIME
67134+ int "Seconds in between log messages (minimum)"
67135+ default 10
67136+ help
67137+ This option allows you to enforce the number of seconds between
67138+ grsecurity log messages. The default should be suitable for most
67139+ people, however, if you choose to change it, choose a value small enough
67140+ to allow informative logs to be produced, but large enough to
67141+ prevent flooding.
67142+
67143+ Setting both this value and GRKERNSEC_FLOODBURST to 0 will disable
67144+ any rate limiting on grsecurity log messages.
67145+
67146+config GRKERNSEC_FLOODBURST
67147+ int "Number of messages in a burst (maximum)"
67148+ default 6
67149+ help
67150+ This option allows you to choose the maximum number of messages allowed
67151+ within the flood time interval you chose in a separate option. The
67152+ default should be suitable for most people, however if you find that
67153+ many of your logs are being interpreted as flooding, you may want to
67154+ raise this value.
67155+
67156+ Setting both this value and GRKERNSEC_FLOODTIME to 0 will disable
67157+ any rate limiting on grsecurity log messages.
67158+
67159+endmenu
67160diff --git a/grsecurity/Makefile b/grsecurity/Makefile
67161new file mode 100644
67162index 0000000..30ababb
67163--- /dev/null
67164+++ b/grsecurity/Makefile
67165@@ -0,0 +1,54 @@
67166+# grsecurity – access control and security hardening for Linux
67167+# All code in this directory and various hooks located throughout the Linux kernel are
67168+# Copyright (C) 2001-2014 Bradley Spengler, Open Source Security, Inc.
67169+# http://www.grsecurity.net spender@grsecurity.net
67170+#
67171+# This program is free software; you can redistribute it and/or
67172+# modify it under the terms of the GNU General Public License version 2
67173+# as published by the Free Software Foundation.
67174+#
67175+# This program is distributed in the hope that it will be useful,
67176+# but WITHOUT ANY WARRANTY; without even the implied warranty of
67177+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
67178+# GNU General Public License for more details.
67179+#
67180+# You should have received a copy of the GNU General Public License
67181+# along with this program; if not, write to the Free Software
67182+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
67183+
67184+KBUILD_CFLAGS += -Werror
67185+
67186+obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
67187+ grsec_mount.o grsec_sig.o grsec_sysctl.o \
67188+ grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o \
67189+ grsec_usb.o grsec_ipc.o grsec_proc.o
67190+
67191+obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
67192+ gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
67193+ gracl_learn.o grsec_log.o gracl_policy.o
67194+ifdef CONFIG_COMPAT
67195+obj-$(CONFIG_GRKERNSEC) += gracl_compat.o
67196+endif
67197+
67198+obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
67199+
67200+ifdef CONFIG_NET
67201+obj-y += grsec_sock.o
67202+obj-$(CONFIG_GRKERNSEC) += gracl_ip.o
67203+endif
67204+
67205+ifndef CONFIG_GRKERNSEC
67206+obj-y += grsec_disabled.o
67207+endif
67208+
67209+ifdef CONFIG_GRKERNSEC_HIDESYM
67210+extra-y := grsec_hidesym.o
67211+$(obj)/grsec_hidesym.o:
67212+ @-chmod -f 500 /boot
67213+ @-chmod -f 500 /lib/modules
67214+ @-chmod -f 500 /lib64/modules
67215+ @-chmod -f 500 /lib32/modules
67216+ @-chmod -f 700 .
67217+ @-chmod -f 700 $(objtree)
67218+ @echo ' grsec: protected kernel image paths'
67219+endif
67220diff --git a/grsecurity/gracl.c b/grsecurity/gracl.c
67221new file mode 100644
67222index 0000000..e56396f
67223--- /dev/null
67224+++ b/grsecurity/gracl.c
67225@@ -0,0 +1,2679 @@
67226+#include <linux/kernel.h>
67227+#include <linux/module.h>
67228+#include <linux/sched.h>
67229+#include <linux/mm.h>
67230+#include <linux/file.h>
67231+#include <linux/fs.h>
67232+#include <linux/namei.h>
67233+#include <linux/mount.h>
67234+#include <linux/tty.h>
67235+#include <linux/proc_fs.h>
67236+#include <linux/lglock.h>
67237+#include <linux/slab.h>
67238+#include <linux/vmalloc.h>
67239+#include <linux/types.h>
67240+#include <linux/sysctl.h>
67241+#include <linux/netdevice.h>
67242+#include <linux/ptrace.h>
67243+#include <linux/gracl.h>
67244+#include <linux/gralloc.h>
67245+#include <linux/security.h>
67246+#include <linux/grinternal.h>
67247+#include <linux/pid_namespace.h>
67248+#include <linux/stop_machine.h>
67249+#include <linux/fdtable.h>
67250+#include <linux/percpu.h>
67251+#include <linux/lglock.h>
67252+#include <linux/hugetlb.h>
67253+#include <linux/posix-timers.h>
67254+#include <linux/prefetch.h>
67255+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
67256+#include <linux/magic.h>
67257+#include <linux/pagemap.h>
67258+#include "../fs/btrfs/async-thread.h"
67259+#include "../fs/btrfs/ctree.h"
67260+#include "../fs/btrfs/btrfs_inode.h"
67261+#endif
67262+#include "../fs/mount.h"
67263+
67264+#include <asm/uaccess.h>
67265+#include <asm/errno.h>
67266+#include <asm/mman.h>
67267+
67268+#define FOR_EACH_ROLE_START(role) \
67269+ role = running_polstate.role_list; \
67270+ while (role) {
67271+
67272+#define FOR_EACH_ROLE_END(role) \
67273+ role = role->prev; \
67274+ }
67275+
67276+extern struct path gr_real_root;
67277+
67278+static struct gr_policy_state running_polstate;
67279+struct gr_policy_state *polstate = &running_polstate;
67280+extern struct gr_alloc_state *current_alloc_state;
67281+
67282+extern char *gr_shared_page[4];
67283+DEFINE_RWLOCK(gr_inode_lock);
67284+
67285+static unsigned int gr_status __read_only = GR_STATUS_INIT;
67286+
67287+#ifdef CONFIG_NET
67288+extern struct vfsmount *sock_mnt;
67289+#endif
67290+
67291+extern struct vfsmount *pipe_mnt;
67292+extern struct vfsmount *shm_mnt;
67293+
67294+#ifdef CONFIG_HUGETLBFS
67295+extern struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
67296+#endif
67297+
67298+extern u16 acl_sp_role_value;
67299+extern struct acl_object_label *fakefs_obj_rw;
67300+extern struct acl_object_label *fakefs_obj_rwx;
67301+
67302+int gr_acl_is_enabled(void)
67303+{
67304+ return (gr_status & GR_READY);
67305+}
67306+
67307+void gr_enable_rbac_system(void)
67308+{
67309+ pax_open_kernel();
67310+ gr_status |= GR_READY;
67311+ pax_close_kernel();
67312+}
67313+
67314+int gr_rbac_disable(void *unused)
67315+{
67316+ pax_open_kernel();
67317+ gr_status &= ~GR_READY;
67318+ pax_close_kernel();
67319+
67320+ return 0;
67321+}
67322+
67323+static inline dev_t __get_dev(const struct dentry *dentry)
67324+{
67325+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
67326+ if (dentry->d_sb->s_magic == BTRFS_SUPER_MAGIC)
67327+ return BTRFS_I(dentry->d_inode)->root->anon_dev;
67328+ else
67329+#endif
67330+ return dentry->d_sb->s_dev;
67331+}
67332+
67333+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
67334+{
67335+ return __get_dev(dentry);
67336+}
67337+
67338+static char gr_task_roletype_to_char(struct task_struct *task)
67339+{
67340+ switch (task->role->roletype &
67341+ (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
67342+ GR_ROLE_SPECIAL)) {
67343+ case GR_ROLE_DEFAULT:
67344+ return 'D';
67345+ case GR_ROLE_USER:
67346+ return 'U';
67347+ case GR_ROLE_GROUP:
67348+ return 'G';
67349+ case GR_ROLE_SPECIAL:
67350+ return 'S';
67351+ }
67352+
67353+ return 'X';
67354+}
67355+
67356+char gr_roletype_to_char(void)
67357+{
67358+ return gr_task_roletype_to_char(current);
67359+}
67360+
67361+__inline__ int
67362+gr_acl_tpe_check(void)
67363+{
67364+ if (unlikely(!(gr_status & GR_READY)))
67365+ return 0;
67366+ if (current->role->roletype & GR_ROLE_TPE)
67367+ return 1;
67368+ else
67369+ return 0;
67370+}
67371+
67372+int
67373+gr_handle_rawio(const struct inode *inode)
67374+{
67375+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
67376+ if (inode && (S_ISBLK(inode->i_mode) || (S_ISCHR(inode->i_mode) && imajor(inode) == RAW_MAJOR)) &&
67377+ grsec_enable_chroot_caps && proc_is_chrooted(current) &&
67378+ !capable(CAP_SYS_RAWIO))
67379+ return 1;
67380+#endif
67381+ return 0;
67382+}
67383+
67384+int
67385+gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
67386+{
67387+ if (likely(lena != lenb))
67388+ return 0;
67389+
67390+ return !memcmp(a, b, lena);
67391+}
67392+
67393+static int prepend(char **buffer, int *buflen, const char *str, int namelen)
67394+{
67395+ *buflen -= namelen;
67396+ if (*buflen < 0)
67397+ return -ENAMETOOLONG;
67398+ *buffer -= namelen;
67399+ memcpy(*buffer, str, namelen);
67400+ return 0;
67401+}
67402+
67403+static int prepend_name(char **buffer, int *buflen, struct qstr *name)
67404+{
67405+ return prepend(buffer, buflen, name->name, name->len);
67406+}
67407+
67408+static int prepend_path(const struct path *path, struct path *root,
67409+ char **buffer, int *buflen)
67410+{
67411+ struct dentry *dentry = path->dentry;
67412+ struct vfsmount *vfsmnt = path->mnt;
67413+ struct mount *mnt = real_mount(vfsmnt);
67414+ bool slash = false;
67415+ int error = 0;
67416+
67417+ while (dentry != root->dentry || vfsmnt != root->mnt) {
67418+ struct dentry * parent;
67419+
67420+ if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
67421+ /* Global root? */
67422+ if (!mnt_has_parent(mnt)) {
67423+ goto out;
67424+ }
67425+ dentry = mnt->mnt_mountpoint;
67426+ mnt = mnt->mnt_parent;
67427+ vfsmnt = &mnt->mnt;
67428+ continue;
67429+ }
67430+ parent = dentry->d_parent;
67431+ prefetch(parent);
67432+ spin_lock(&dentry->d_lock);
67433+ error = prepend_name(buffer, buflen, &dentry->d_name);
67434+ spin_unlock(&dentry->d_lock);
67435+ if (!error)
67436+ error = prepend(buffer, buflen, "/", 1);
67437+ if (error)
67438+ break;
67439+
67440+ slash = true;
67441+ dentry = parent;
67442+ }
67443+
67444+out:
67445+ if (!error && !slash)
67446+ error = prepend(buffer, buflen, "/", 1);
67447+
67448+ return error;
67449+}
67450+
67451+/* this must be called with mount_lock and rename_lock held */
67452+
67453+static char *__our_d_path(const struct path *path, struct path *root,
67454+ char *buf, int buflen)
67455+{
67456+ char *res = buf + buflen;
67457+ int error;
67458+
67459+ prepend(&res, &buflen, "\0", 1);
67460+ error = prepend_path(path, root, &res, &buflen);
67461+ if (error)
67462+ return ERR_PTR(error);
67463+
67464+ return res;
67465+}
67466+
67467+static char *
67468+gen_full_path(struct path *path, struct path *root, char *buf, int buflen)
67469+{
67470+ char *retval;
67471+
67472+ retval = __our_d_path(path, root, buf, buflen);
67473+ if (unlikely(IS_ERR(retval)))
67474+ retval = strcpy(buf, "<path too long>");
67475+ else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
67476+ retval[1] = '\0';
67477+
67478+ return retval;
67479+}
67480+
67481+static char *
67482+__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
67483+ char *buf, int buflen)
67484+{
67485+ struct path path;
67486+ char *res;
67487+
67488+ path.dentry = (struct dentry *)dentry;
67489+ path.mnt = (struct vfsmount *)vfsmnt;
67490+
67491+ /* we can use gr_real_root.dentry, gr_real_root.mnt, because this is only called
67492+ by the RBAC system */
67493+ res = gen_full_path(&path, &gr_real_root, buf, buflen);
67494+
67495+ return res;
67496+}
67497+
67498+static char *
67499+d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
67500+ char *buf, int buflen)
67501+{
67502+ char *res;
67503+ struct path path;
67504+ struct path root;
67505+ struct task_struct *reaper = init_pid_ns.child_reaper;
67506+
67507+ path.dentry = (struct dentry *)dentry;
67508+ path.mnt = (struct vfsmount *)vfsmnt;
67509+
67510+ /* we can't use gr_real_root.dentry, gr_real_root.mnt, because they belong only to the RBAC system */
67511+ get_fs_root(reaper->fs, &root);
67512+
67513+ read_seqlock_excl(&mount_lock);
67514+ write_seqlock(&rename_lock);
67515+ res = gen_full_path(&path, &root, buf, buflen);
67516+ write_sequnlock(&rename_lock);
67517+ read_sequnlock_excl(&mount_lock);
67518+
67519+ path_put(&root);
67520+ return res;
67521+}
67522+
67523+char *
67524+gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
67525+{
67526+ char *ret;
67527+ read_seqlock_excl(&mount_lock);
67528+ write_seqlock(&rename_lock);
67529+ ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
67530+ PAGE_SIZE);
67531+ write_sequnlock(&rename_lock);
67532+ read_sequnlock_excl(&mount_lock);
67533+ return ret;
67534+}
67535+
67536+static char *
67537+gr_to_proc_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
67538+{
67539+ char *ret;
67540+ char *buf;
67541+ int buflen;
67542+
67543+ read_seqlock_excl(&mount_lock);
67544+ write_seqlock(&rename_lock);
67545+ buf = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
67546+ ret = __d_real_path(dentry, mnt, buf, PAGE_SIZE - 6);
67547+ buflen = (int)(ret - buf);
67548+ if (buflen >= 5)
67549+ prepend(&ret, &buflen, "/proc", 5);
67550+ else
67551+ ret = strcpy(buf, "<path too long>");
67552+ write_sequnlock(&rename_lock);
67553+ read_sequnlock_excl(&mount_lock);
67554+ return ret;
67555+}
67556+
67557+char *
67558+gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
67559+{
67560+ return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
67561+ PAGE_SIZE);
67562+}
67563+
67564+char *
67565+gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
67566+{
67567+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
67568+ PAGE_SIZE);
67569+}
67570+
67571+char *
67572+gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
67573+{
67574+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
67575+ PAGE_SIZE);
67576+}
67577+
67578+char *
67579+gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
67580+{
67581+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
67582+ PAGE_SIZE);
67583+}
67584+
67585+char *
67586+gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
67587+{
67588+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
67589+ PAGE_SIZE);
67590+}
67591+
67592+__inline__ __u32
67593+to_gr_audit(const __u32 reqmode)
67594+{
67595+ /* masks off auditable permission flags, then shifts them to create
67596+ auditing flags, and adds the special case of append auditing if
67597+ we're requesting write */
67598+ return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
67599+}
67600+
67601+struct acl_role_label *
67602+__lookup_acl_role_label(const struct gr_policy_state *state, const struct task_struct *task, const uid_t uid,
67603+ const gid_t gid)
67604+{
67605+ unsigned int index = gr_rhash(uid, GR_ROLE_USER, state->acl_role_set.r_size);
67606+ struct acl_role_label *match;
67607+ struct role_allowed_ip *ipp;
67608+ unsigned int x;
67609+ u32 curr_ip = task->signal->saved_ip;
67610+
67611+ match = state->acl_role_set.r_hash[index];
67612+
67613+ while (match) {
67614+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
67615+ for (x = 0; x < match->domain_child_num; x++) {
67616+ if (match->domain_children[x] == uid)
67617+ goto found;
67618+ }
67619+ } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
67620+ break;
67621+ match = match->next;
67622+ }
67623+found:
67624+ if (match == NULL) {
67625+ try_group:
67626+ index = gr_rhash(gid, GR_ROLE_GROUP, state->acl_role_set.r_size);
67627+ match = state->acl_role_set.r_hash[index];
67628+
67629+ while (match) {
67630+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
67631+ for (x = 0; x < match->domain_child_num; x++) {
67632+ if (match->domain_children[x] == gid)
67633+ goto found2;
67634+ }
67635+ } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
67636+ break;
67637+ match = match->next;
67638+ }
67639+found2:
67640+ if (match == NULL)
67641+ match = state->default_role;
67642+ if (match->allowed_ips == NULL)
67643+ return match;
67644+ else {
67645+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
67646+ if (likely
67647+ ((ntohl(curr_ip) & ipp->netmask) ==
67648+ (ntohl(ipp->addr) & ipp->netmask)))
67649+ return match;
67650+ }
67651+ match = state->default_role;
67652+ }
67653+ } else if (match->allowed_ips == NULL) {
67654+ return match;
67655+ } else {
67656+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
67657+ if (likely
67658+ ((ntohl(curr_ip) & ipp->netmask) ==
67659+ (ntohl(ipp->addr) & ipp->netmask)))
67660+ return match;
67661+ }
67662+ goto try_group;
67663+ }
67664+
67665+ return match;
67666+}
67667+
67668+static struct acl_role_label *
67669+lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
67670+ const gid_t gid)
67671+{
67672+ return __lookup_acl_role_label(&running_polstate, task, uid, gid);
67673+}
67674+
67675+struct acl_subject_label *
67676+lookup_acl_subj_label(const ino_t ino, const dev_t dev,
67677+ const struct acl_role_label *role)
67678+{
67679+ unsigned int index = gr_fhash(ino, dev, role->subj_hash_size);
67680+ struct acl_subject_label *match;
67681+
67682+ match = role->subj_hash[index];
67683+
67684+ while (match && (match->inode != ino || match->device != dev ||
67685+ (match->mode & GR_DELETED))) {
67686+ match = match->next;
67687+ }
67688+
67689+ if (match && !(match->mode & GR_DELETED))
67690+ return match;
67691+ else
67692+ return NULL;
67693+}
67694+
67695+struct acl_subject_label *
67696+lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev,
67697+ const struct acl_role_label *role)
67698+{
67699+ unsigned int index = gr_fhash(ino, dev, role->subj_hash_size);
67700+ struct acl_subject_label *match;
67701+
67702+ match = role->subj_hash[index];
67703+
67704+ while (match && (match->inode != ino || match->device != dev ||
67705+ !(match->mode & GR_DELETED))) {
67706+ match = match->next;
67707+ }
67708+
67709+ if (match && (match->mode & GR_DELETED))
67710+ return match;
67711+ else
67712+ return NULL;
67713+}
67714+
67715+static struct acl_object_label *
67716+lookup_acl_obj_label(const ino_t ino, const dev_t dev,
67717+ const struct acl_subject_label *subj)
67718+{
67719+ unsigned int index = gr_fhash(ino, dev, subj->obj_hash_size);
67720+ struct acl_object_label *match;
67721+
67722+ match = subj->obj_hash[index];
67723+
67724+ while (match && (match->inode != ino || match->device != dev ||
67725+ (match->mode & GR_DELETED))) {
67726+ match = match->next;
67727+ }
67728+
67729+ if (match && !(match->mode & GR_DELETED))
67730+ return match;
67731+ else
67732+ return NULL;
67733+}
67734+
67735+static struct acl_object_label *
67736+lookup_acl_obj_label_create(const ino_t ino, const dev_t dev,
67737+ const struct acl_subject_label *subj)
67738+{
67739+ unsigned int index = gr_fhash(ino, dev, subj->obj_hash_size);
67740+ struct acl_object_label *match;
67741+
67742+ match = subj->obj_hash[index];
67743+
67744+ while (match && (match->inode != ino || match->device != dev ||
67745+ !(match->mode & GR_DELETED))) {
67746+ match = match->next;
67747+ }
67748+
67749+ if (match && (match->mode & GR_DELETED))
67750+ return match;
67751+
67752+ match = subj->obj_hash[index];
67753+
67754+ while (match && (match->inode != ino || match->device != dev ||
67755+ (match->mode & GR_DELETED))) {
67756+ match = match->next;
67757+ }
67758+
67759+ if (match && !(match->mode & GR_DELETED))
67760+ return match;
67761+ else
67762+ return NULL;
67763+}
67764+
67765+struct name_entry *
67766+__lookup_name_entry(const struct gr_policy_state *state, const char *name)
67767+{
67768+ unsigned int len = strlen(name);
67769+ unsigned int key = full_name_hash(name, len);
67770+ unsigned int index = key % state->name_set.n_size;
67771+ struct name_entry *match;
67772+
67773+ match = state->name_set.n_hash[index];
67774+
67775+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
67776+ match = match->next;
67777+
67778+ return match;
67779+}
67780+
67781+static struct name_entry *
67782+lookup_name_entry(const char *name)
67783+{
67784+ return __lookup_name_entry(&running_polstate, name);
67785+}
67786+
67787+static struct name_entry *
67788+lookup_name_entry_create(const char *name)
67789+{
67790+ unsigned int len = strlen(name);
67791+ unsigned int key = full_name_hash(name, len);
67792+ unsigned int index = key % running_polstate.name_set.n_size;
67793+ struct name_entry *match;
67794+
67795+ match = running_polstate.name_set.n_hash[index];
67796+
67797+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
67798+ !match->deleted))
67799+ match = match->next;
67800+
67801+ if (match && match->deleted)
67802+ return match;
67803+
67804+ match = running_polstate.name_set.n_hash[index];
67805+
67806+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
67807+ match->deleted))
67808+ match = match->next;
67809+
67810+ if (match && !match->deleted)
67811+ return match;
67812+ else
67813+ return NULL;
67814+}
67815+
67816+static struct inodev_entry *
67817+lookup_inodev_entry(const ino_t ino, const dev_t dev)
67818+{
67819+ unsigned int index = gr_fhash(ino, dev, running_polstate.inodev_set.i_size);
67820+ struct inodev_entry *match;
67821+
67822+ match = running_polstate.inodev_set.i_hash[index];
67823+
67824+ while (match && (match->nentry->inode != ino || match->nentry->device != dev))
67825+ match = match->next;
67826+
67827+ return match;
67828+}
67829+
67830+void
67831+__insert_inodev_entry(const struct gr_policy_state *state, struct inodev_entry *entry)
67832+{
67833+ unsigned int index = gr_fhash(entry->nentry->inode, entry->nentry->device,
67834+ state->inodev_set.i_size);
67835+ struct inodev_entry **curr;
67836+
67837+ entry->prev = NULL;
67838+
67839+ curr = &state->inodev_set.i_hash[index];
67840+ if (*curr != NULL)
67841+ (*curr)->prev = entry;
67842+
67843+ entry->next = *curr;
67844+ *curr = entry;
67845+
67846+ return;
67847+}
67848+
67849+static void
67850+insert_inodev_entry(struct inodev_entry *entry)
67851+{
67852+ __insert_inodev_entry(&running_polstate, entry);
67853+}
67854+
67855+void
67856+insert_acl_obj_label(struct acl_object_label *obj,
67857+ struct acl_subject_label *subj)
67858+{
67859+ unsigned int index =
67860+ gr_fhash(obj->inode, obj->device, subj->obj_hash_size);
67861+ struct acl_object_label **curr;
67862+
67863+ obj->prev = NULL;
67864+
67865+ curr = &subj->obj_hash[index];
67866+ if (*curr != NULL)
67867+ (*curr)->prev = obj;
67868+
67869+ obj->next = *curr;
67870+ *curr = obj;
67871+
67872+ return;
67873+}
67874+
67875+void
67876+insert_acl_subj_label(struct acl_subject_label *obj,
67877+ struct acl_role_label *role)
67878+{
67879+ unsigned int index = gr_fhash(obj->inode, obj->device, role->subj_hash_size);
67880+ struct acl_subject_label **curr;
67881+
67882+ obj->prev = NULL;
67883+
67884+ curr = &role->subj_hash[index];
67885+ if (*curr != NULL)
67886+ (*curr)->prev = obj;
67887+
67888+ obj->next = *curr;
67889+ *curr = obj;
67890+
67891+ return;
67892+}
67893+
67894+/* derived from glibc fnmatch() 0: match, 1: no match*/
67895+
67896+static int
67897+glob_match(const char *p, const char *n)
67898+{
67899+ char c;
67900+
67901+ while ((c = *p++) != '\0') {
67902+ switch (c) {
67903+ case '?':
67904+ if (*n == '\0')
67905+ return 1;
67906+ else if (*n == '/')
67907+ return 1;
67908+ break;
67909+ case '\\':
67910+ if (*n != c)
67911+ return 1;
67912+ break;
67913+ case '*':
67914+ for (c = *p++; c == '?' || c == '*'; c = *p++) {
67915+ if (*n == '/')
67916+ return 1;
67917+ else if (c == '?') {
67918+ if (*n == '\0')
67919+ return 1;
67920+ else
67921+ ++n;
67922+ }
67923+ }
67924+ if (c == '\0') {
67925+ return 0;
67926+ } else {
67927+ const char *endp;
67928+
67929+ if ((endp = strchr(n, '/')) == NULL)
67930+ endp = n + strlen(n);
67931+
67932+ if (c == '[') {
67933+ for (--p; n < endp; ++n)
67934+ if (!glob_match(p, n))
67935+ return 0;
67936+ } else if (c == '/') {
67937+ while (*n != '\0' && *n != '/')
67938+ ++n;
67939+ if (*n == '/' && !glob_match(p, n + 1))
67940+ return 0;
67941+ } else {
67942+ for (--p; n < endp; ++n)
67943+ if (*n == c && !glob_match(p, n))
67944+ return 0;
67945+ }
67946+
67947+ return 1;
67948+ }
67949+ case '[':
67950+ {
67951+ int not;
67952+ char cold;
67953+
67954+ if (*n == '\0' || *n == '/')
67955+ return 1;
67956+
67957+ not = (*p == '!' || *p == '^');
67958+ if (not)
67959+ ++p;
67960+
67961+ c = *p++;
67962+ for (;;) {
67963+ unsigned char fn = (unsigned char)*n;
67964+
67965+ if (c == '\0')
67966+ return 1;
67967+ else {
67968+ if (c == fn)
67969+ goto matched;
67970+ cold = c;
67971+ c = *p++;
67972+
67973+ if (c == '-' && *p != ']') {
67974+ unsigned char cend = *p++;
67975+
67976+ if (cend == '\0')
67977+ return 1;
67978+
67979+ if (cold <= fn && fn <= cend)
67980+ goto matched;
67981+
67982+ c = *p++;
67983+ }
67984+ }
67985+
67986+ if (c == ']')
67987+ break;
67988+ }
67989+ if (!not)
67990+ return 1;
67991+ break;
67992+ matched:
67993+ while (c != ']') {
67994+ if (c == '\0')
67995+ return 1;
67996+
67997+ c = *p++;
67998+ }
67999+ if (not)
68000+ return 1;
68001+ }
68002+ break;
68003+ default:
68004+ if (c != *n)
68005+ return 1;
68006+ }
68007+
68008+ ++n;
68009+ }
68010+
68011+ if (*n == '\0')
68012+ return 0;
68013+
68014+ if (*n == '/')
68015+ return 0;
68016+
68017+ return 1;
68018+}
68019+
68020+static struct acl_object_label *
68021+chk_glob_label(struct acl_object_label *globbed,
68022+ const struct dentry *dentry, const struct vfsmount *mnt, char **path)
68023+{
68024+ struct acl_object_label *tmp;
68025+
68026+ if (*path == NULL)
68027+ *path = gr_to_filename_nolock(dentry, mnt);
68028+
68029+ tmp = globbed;
68030+
68031+ while (tmp) {
68032+ if (!glob_match(tmp->filename, *path))
68033+ return tmp;
68034+ tmp = tmp->next;
68035+ }
68036+
68037+ return NULL;
68038+}
68039+
68040+static struct acl_object_label *
68041+__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
68042+ const ino_t curr_ino, const dev_t curr_dev,
68043+ const struct acl_subject_label *subj, char **path, const int checkglob)
68044+{
68045+ struct acl_subject_label *tmpsubj;
68046+ struct acl_object_label *retval;
68047+ struct acl_object_label *retval2;
68048+
68049+ tmpsubj = (struct acl_subject_label *) subj;
68050+ read_lock(&gr_inode_lock);
68051+ do {
68052+ retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
68053+ if (retval) {
68054+ if (checkglob && retval->globbed) {
68055+ retval2 = chk_glob_label(retval->globbed, orig_dentry, orig_mnt, path);
68056+ if (retval2)
68057+ retval = retval2;
68058+ }
68059+ break;
68060+ }
68061+ } while ((tmpsubj = tmpsubj->parent_subject));
68062+ read_unlock(&gr_inode_lock);
68063+
68064+ return retval;
68065+}
68066+
68067+static __inline__ struct acl_object_label *
68068+full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
68069+ struct dentry *curr_dentry,
68070+ const struct acl_subject_label *subj, char **path, const int checkglob)
68071+{
68072+ int newglob = checkglob;
68073+ ino_t inode;
68074+ dev_t device;
68075+
68076+ /* if we aren't checking a subdirectory of the original path yet, don't do glob checking
68077+ as we don't want a / * rule to match instead of the / object
68078+ don't do this for create lookups that call this function though, since they're looking up
68079+ on the parent and thus need globbing checks on all paths
68080+ */
68081+ if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB)
68082+ newglob = GR_NO_GLOB;
68083+
68084+ spin_lock(&curr_dentry->d_lock);
68085+ inode = curr_dentry->d_inode->i_ino;
68086+ device = __get_dev(curr_dentry);
68087+ spin_unlock(&curr_dentry->d_lock);
68088+
68089+ return __full_lookup(orig_dentry, orig_mnt, inode, device, subj, path, newglob);
68090+}
68091+
68092+#ifdef CONFIG_HUGETLBFS
68093+static inline bool
68094+is_hugetlbfs_mnt(const struct vfsmount *mnt)
68095+{
68096+ int i;
68097+ for (i = 0; i < HUGE_MAX_HSTATE; i++) {
68098+ if (unlikely(hugetlbfs_vfsmount[i] == mnt))
68099+ return true;
68100+ }
68101+
68102+ return false;
68103+}
68104+#endif
68105+
68106+static struct acl_object_label *
68107+__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
68108+ const struct acl_subject_label *subj, char *path, const int checkglob)
68109+{
68110+ struct dentry *dentry = (struct dentry *) l_dentry;
68111+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
68112+ struct mount *real_mnt = real_mount(mnt);
68113+ struct acl_object_label *retval;
68114+ struct dentry *parent;
68115+
68116+ read_seqlock_excl(&mount_lock);
68117+ write_seqlock(&rename_lock);
68118+
68119+ if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt ||
68120+#ifdef CONFIG_NET
68121+ mnt == sock_mnt ||
68122+#endif
68123+#ifdef CONFIG_HUGETLBFS
68124+ (is_hugetlbfs_mnt(mnt) && dentry->d_inode->i_nlink == 0) ||
68125+#endif
68126+ /* ignore Eric Biederman */
68127+ IS_PRIVATE(l_dentry->d_inode))) {
68128+ retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw;
68129+ goto out;
68130+ }
68131+
68132+ for (;;) {
68133+ if (dentry == gr_real_root.dentry && mnt == gr_real_root.mnt)
68134+ break;
68135+
68136+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
68137+ if (!mnt_has_parent(real_mnt))
68138+ break;
68139+
68140+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
68141+ if (retval != NULL)
68142+ goto out;
68143+
68144+ dentry = real_mnt->mnt_mountpoint;
68145+ real_mnt = real_mnt->mnt_parent;
68146+ mnt = &real_mnt->mnt;
68147+ continue;
68148+ }
68149+
68150+ parent = dentry->d_parent;
68151+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
68152+ if (retval != NULL)
68153+ goto out;
68154+
68155+ dentry = parent;
68156+ }
68157+
68158+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
68159+
68160+ /* gr_real_root is pinned so we don't have to hold a reference */
68161+ if (retval == NULL)
68162+ retval = full_lookup(l_dentry, l_mnt, gr_real_root.dentry, subj, &path, checkglob);
68163+out:
68164+ write_sequnlock(&rename_lock);
68165+ read_sequnlock_excl(&mount_lock);
68166+
68167+ BUG_ON(retval == NULL);
68168+
68169+ return retval;
68170+}
68171+
68172+static __inline__ struct acl_object_label *
68173+chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
68174+ const struct acl_subject_label *subj)
68175+{
68176+ char *path = NULL;
68177+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB);
68178+}
68179+
68180+static __inline__ struct acl_object_label *
68181+chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
68182+ const struct acl_subject_label *subj)
68183+{
68184+ char *path = NULL;
68185+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB);
68186+}
68187+
68188+static __inline__ struct acl_object_label *
68189+chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
68190+ const struct acl_subject_label *subj, char *path)
68191+{
68192+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB);
68193+}
68194+
68195+struct acl_subject_label *
68196+chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
68197+ const struct acl_role_label *role)
68198+{
68199+ struct dentry *dentry = (struct dentry *) l_dentry;
68200+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
68201+ struct mount *real_mnt = real_mount(mnt);
68202+ struct acl_subject_label *retval;
68203+ struct dentry *parent;
68204+
68205+ read_seqlock_excl(&mount_lock);
68206+ write_seqlock(&rename_lock);
68207+
68208+ for (;;) {
68209+ if (dentry == gr_real_root.dentry && mnt == gr_real_root.mnt)
68210+ break;
68211+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
68212+ if (!mnt_has_parent(real_mnt))
68213+ break;
68214+
68215+ spin_lock(&dentry->d_lock);
68216+ read_lock(&gr_inode_lock);
68217+ retval =
68218+ lookup_acl_subj_label(dentry->d_inode->i_ino,
68219+ __get_dev(dentry), role);
68220+ read_unlock(&gr_inode_lock);
68221+ spin_unlock(&dentry->d_lock);
68222+ if (retval != NULL)
68223+ goto out;
68224+
68225+ dentry = real_mnt->mnt_mountpoint;
68226+ real_mnt = real_mnt->mnt_parent;
68227+ mnt = &real_mnt->mnt;
68228+ continue;
68229+ }
68230+
68231+ spin_lock(&dentry->d_lock);
68232+ read_lock(&gr_inode_lock);
68233+ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
68234+ __get_dev(dentry), role);
68235+ read_unlock(&gr_inode_lock);
68236+ parent = dentry->d_parent;
68237+ spin_unlock(&dentry->d_lock);
68238+
68239+ if (retval != NULL)
68240+ goto out;
68241+
68242+ dentry = parent;
68243+ }
68244+
68245+ spin_lock(&dentry->d_lock);
68246+ read_lock(&gr_inode_lock);
68247+ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
68248+ __get_dev(dentry), role);
68249+ read_unlock(&gr_inode_lock);
68250+ spin_unlock(&dentry->d_lock);
68251+
68252+ if (unlikely(retval == NULL)) {
68253+ /* gr_real_root is pinned, we don't need to hold a reference */
68254+ read_lock(&gr_inode_lock);
68255+ retval = lookup_acl_subj_label(gr_real_root.dentry->d_inode->i_ino,
68256+ __get_dev(gr_real_root.dentry), role);
68257+ read_unlock(&gr_inode_lock);
68258+ }
68259+out:
68260+ write_sequnlock(&rename_lock);
68261+ read_sequnlock_excl(&mount_lock);
68262+
68263+ BUG_ON(retval == NULL);
68264+
68265+ return retval;
68266+}
68267+
68268+void
68269+assign_special_role(const char *rolename)
68270+{
68271+ struct acl_object_label *obj;
68272+ struct acl_role_label *r;
68273+ struct acl_role_label *assigned = NULL;
68274+ struct task_struct *tsk;
68275+ struct file *filp;
68276+
68277+ FOR_EACH_ROLE_START(r)
68278+ if (!strcmp(rolename, r->rolename) &&
68279+ (r->roletype & GR_ROLE_SPECIAL)) {
68280+ assigned = r;
68281+ break;
68282+ }
68283+ FOR_EACH_ROLE_END(r)
68284+
68285+ if (!assigned)
68286+ return;
68287+
68288+ read_lock(&tasklist_lock);
68289+ read_lock(&grsec_exec_file_lock);
68290+
68291+ tsk = current->real_parent;
68292+ if (tsk == NULL)
68293+ goto out_unlock;
68294+
68295+ filp = tsk->exec_file;
68296+ if (filp == NULL)
68297+ goto out_unlock;
68298+
68299+ tsk->is_writable = 0;
68300+ tsk->inherited = 0;
68301+
68302+ tsk->acl_sp_role = 1;
68303+ tsk->acl_role_id = ++acl_sp_role_value;
68304+ tsk->role = assigned;
68305+ tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
68306+
68307+ /* ignore additional mmap checks for processes that are writable
68308+ by the default ACL */
68309+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, running_polstate.default_role->root_label);
68310+ if (unlikely(obj->mode & GR_WRITE))
68311+ tsk->is_writable = 1;
68312+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
68313+ if (unlikely(obj->mode & GR_WRITE))
68314+ tsk->is_writable = 1;
68315+
68316+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
68317+ printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename,
68318+ tsk->acl->filename, tsk->comm, task_pid_nr(tsk));
68319+#endif
68320+
68321+out_unlock:
68322+ read_unlock(&grsec_exec_file_lock);
68323+ read_unlock(&tasklist_lock);
68324+ return;
68325+}
68326+
68327+
68328+static void
68329+gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
68330+{
68331+ struct task_struct *task = current;
68332+ const struct cred *cred = current_cred();
68333+
68334+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
68335+ GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
68336+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
68337+ 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip);
68338+
68339+ return;
68340+}
68341+
68342+static void
68343+gr_log_learn_uid_change(const kuid_t real, const kuid_t effective, const kuid_t fs)
68344+{
68345+ struct task_struct *task = current;
68346+ const struct cred *cred = current_cred();
68347+
68348+ security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
68349+ GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
68350+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
68351+ 'u', GR_GLOBAL_UID(real), GR_GLOBAL_UID(effective), GR_GLOBAL_UID(fs), &task->signal->saved_ip);
68352+
68353+ return;
68354+}
68355+
68356+static void
68357+gr_log_learn_gid_change(const kgid_t real, const kgid_t effective, const kgid_t fs)
68358+{
68359+ struct task_struct *task = current;
68360+ const struct cred *cred = current_cred();
68361+
68362+ security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
68363+ GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
68364+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
68365+ 'g', GR_GLOBAL_GID(real), GR_GLOBAL_GID(effective), GR_GLOBAL_GID(fs), &task->signal->saved_ip);
68366+
68367+ return;
68368+}
68369+
68370+static void
68371+gr_set_proc_res(struct task_struct *task)
68372+{
68373+ struct acl_subject_label *proc;
68374+ unsigned short i;
68375+
68376+ proc = task->acl;
68377+
68378+ if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
68379+ return;
68380+
68381+ for (i = 0; i < RLIM_NLIMITS; i++) {
68382+ if (!(proc->resmask & (1U << i)))
68383+ continue;
68384+
68385+ task->signal->rlim[i].rlim_cur = proc->res[i].rlim_cur;
68386+ task->signal->rlim[i].rlim_max = proc->res[i].rlim_max;
68387+
68388+ if (i == RLIMIT_CPU)
68389+ update_rlimit_cpu(task, proc->res[i].rlim_cur);
68390+ }
68391+
68392+ return;
68393+}
68394+
68395+/* both of the below must be called with
68396+ rcu_read_lock();
68397+ read_lock(&tasklist_lock);
68398+ read_lock(&grsec_exec_file_lock);
68399+*/
68400+
68401+struct acl_subject_label *__gr_get_subject_for_task(const struct gr_policy_state *state, struct task_struct *task, const char *filename)
68402+{
68403+ char *tmpname;
68404+ struct acl_subject_label *tmpsubj;
68405+ struct file *filp;
68406+ struct name_entry *nmatch;
68407+
68408+ filp = task->exec_file;
68409+ if (filp == NULL)
68410+ return NULL;
68411+
68412+ /* the following is to apply the correct subject
68413+ on binaries running when the RBAC system
68414+ is enabled, when the binaries have been
68415+ replaced or deleted since their execution
68416+ -----
68417+ when the RBAC system starts, the inode/dev
68418+ from exec_file will be one the RBAC system
68419+ is unaware of. It only knows the inode/dev
68420+ of the present file on disk, or the absence
68421+ of it.
68422+ */
68423+
68424+ if (filename)
68425+ nmatch = __lookup_name_entry(state, filename);
68426+ else {
68427+ preempt_disable();
68428+ tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
68429+
68430+ nmatch = __lookup_name_entry(state, tmpname);
68431+ preempt_enable();
68432+ }
68433+ tmpsubj = NULL;
68434+ if (nmatch) {
68435+ if (nmatch->deleted)
68436+ tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
68437+ else
68438+ tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
68439+ }
68440+ /* this also works for the reload case -- if we don't match a potentially inherited subject
68441+ then we fall back to a normal lookup based on the binary's ino/dev
68442+ */
68443+ if (tmpsubj == NULL)
68444+ tmpsubj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, task->role);
68445+
68446+ return tmpsubj;
68447+}
68448+
68449+static struct acl_subject_label *gr_get_subject_for_task(struct task_struct *task, const char *filename)
68450+{
68451+ return __gr_get_subject_for_task(&running_polstate, task, filename);
68452+}
68453+
68454+void __gr_apply_subject_to_task(const struct gr_policy_state *state, struct task_struct *task, struct acl_subject_label *subj)
68455+{
68456+ struct acl_object_label *obj;
68457+ struct file *filp;
68458+
68459+ filp = task->exec_file;
68460+
68461+ task->acl = subj;
68462+ task->is_writable = 0;
68463+ /* ignore additional mmap checks for processes that are writable
68464+ by the default ACL */
68465+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, state->default_role->root_label);
68466+ if (unlikely(obj->mode & GR_WRITE))
68467+ task->is_writable = 1;
68468+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
68469+ if (unlikely(obj->mode & GR_WRITE))
68470+ task->is_writable = 1;
68471+
68472+ gr_set_proc_res(task);
68473+
68474+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
68475+ printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task_pid_nr(task), task->role->rolename, task->acl->filename);
68476+#endif
68477+}
68478+
68479+static void gr_apply_subject_to_task(struct task_struct *task, struct acl_subject_label *subj)
68480+{
68481+ __gr_apply_subject_to_task(&running_polstate, task, subj);
68482+}
68483+
68484+__u32
68485+gr_search_file(const struct dentry * dentry, const __u32 mode,
68486+ const struct vfsmount * mnt)
68487+{
68488+ __u32 retval = mode;
68489+ struct acl_subject_label *curracl;
68490+ struct acl_object_label *currobj;
68491+
68492+ if (unlikely(!(gr_status & GR_READY)))
68493+ return (mode & ~GR_AUDITS);
68494+
68495+ curracl = current->acl;
68496+
68497+ currobj = chk_obj_label(dentry, mnt, curracl);
68498+ retval = currobj->mode & mode;
68499+
68500+ /* if we're opening a specified transfer file for writing
68501+ (e.g. /dev/initctl), then transfer our role to init
68502+ */
68503+ if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE &&
68504+ current->role->roletype & GR_ROLE_PERSIST)) {
68505+ struct task_struct *task = init_pid_ns.child_reaper;
68506+
68507+ if (task->role != current->role) {
68508+ struct acl_subject_label *subj;
68509+
68510+ task->acl_sp_role = 0;
68511+ task->acl_role_id = current->acl_role_id;
68512+ task->role = current->role;
68513+ rcu_read_lock();
68514+ read_lock(&grsec_exec_file_lock);
68515+ subj = gr_get_subject_for_task(task, NULL);
68516+ gr_apply_subject_to_task(task, subj);
68517+ read_unlock(&grsec_exec_file_lock);
68518+ rcu_read_unlock();
68519+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG);
68520+ }
68521+ }
68522+
68523+ if (unlikely
68524+ ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
68525+ && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
68526+ __u32 new_mode = mode;
68527+
68528+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
68529+
68530+ retval = new_mode;
68531+
68532+ if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
68533+ new_mode |= GR_INHERIT;
68534+
68535+ if (!(mode & GR_NOLEARN))
68536+ gr_log_learn(dentry, mnt, new_mode);
68537+ }
68538+
68539+ return retval;
68540+}
68541+
68542+struct acl_object_label *gr_get_create_object(const struct dentry *new_dentry,
68543+ const struct dentry *parent,
68544+ const struct vfsmount *mnt)
68545+{
68546+ struct name_entry *match;
68547+ struct acl_object_label *matchpo;
68548+ struct acl_subject_label *curracl;
68549+ char *path;
68550+
68551+ if (unlikely(!(gr_status & GR_READY)))
68552+ return NULL;
68553+
68554+ preempt_disable();
68555+ path = gr_to_filename_rbac(new_dentry, mnt);
68556+ match = lookup_name_entry_create(path);
68557+
68558+ curracl = current->acl;
68559+
68560+ if (match) {
68561+ read_lock(&gr_inode_lock);
68562+ matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
68563+ read_unlock(&gr_inode_lock);
68564+
68565+ if (matchpo) {
68566+ preempt_enable();
68567+ return matchpo;
68568+ }
68569+ }
68570+
68571+ // lookup parent
68572+
68573+ matchpo = chk_obj_create_label(parent, mnt, curracl, path);
68574+
68575+ preempt_enable();
68576+ return matchpo;
68577+}
68578+
68579+__u32
68580+gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
68581+ const struct vfsmount * mnt, const __u32 mode)
68582+{
68583+ struct acl_object_label *matchpo;
68584+ __u32 retval;
68585+
68586+ if (unlikely(!(gr_status & GR_READY)))
68587+ return (mode & ~GR_AUDITS);
68588+
68589+ matchpo = gr_get_create_object(new_dentry, parent, mnt);
68590+
68591+ retval = matchpo->mode & mode;
68592+
68593+ if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
68594+ && (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
68595+ __u32 new_mode = mode;
68596+
68597+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
68598+
68599+ gr_log_learn(new_dentry, mnt, new_mode);
68600+ return new_mode;
68601+ }
68602+
68603+ return retval;
68604+}
68605+
68606+__u32
68607+gr_check_link(const struct dentry * new_dentry,
68608+ const struct dentry * parent_dentry,
68609+ const struct vfsmount * parent_mnt,
68610+ const struct dentry * old_dentry, const struct vfsmount * old_mnt)
68611+{
68612+ struct acl_object_label *obj;
68613+ __u32 oldmode, newmode;
68614+ __u32 needmode;
68615+ __u32 checkmodes = GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC | GR_SETID | GR_READ |
68616+ GR_DELETE | GR_INHERIT;
68617+
68618+ if (unlikely(!(gr_status & GR_READY)))
68619+ return (GR_CREATE | GR_LINK);
68620+
68621+ obj = chk_obj_label(old_dentry, old_mnt, current->acl);
68622+ oldmode = obj->mode;
68623+
68624+ obj = gr_get_create_object(new_dentry, parent_dentry, parent_mnt);
68625+ newmode = obj->mode;
68626+
68627+ needmode = newmode & checkmodes;
68628+
68629+ // old name for hardlink must have at least the permissions of the new name
68630+ if ((oldmode & needmode) != needmode)
68631+ goto bad;
68632+
68633+ // if old name had restrictions/auditing, make sure the new name does as well
68634+ needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
68635+
68636+ // don't allow hardlinking of suid/sgid/fcapped files without permission
68637+ if (is_privileged_binary(old_dentry))
68638+ needmode |= GR_SETID;
68639+
68640+ if ((newmode & needmode) != needmode)
68641+ goto bad;
68642+
68643+ // enforce minimum permissions
68644+ if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
68645+ return newmode;
68646+bad:
68647+ needmode = oldmode;
68648+ if (is_privileged_binary(old_dentry))
68649+ needmode |= GR_SETID;
68650+
68651+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
68652+ gr_log_learn(old_dentry, old_mnt, needmode | GR_CREATE | GR_LINK);
68653+ return (GR_CREATE | GR_LINK);
68654+ } else if (newmode & GR_SUPPRESS)
68655+ return GR_SUPPRESS;
68656+ else
68657+ return 0;
68658+}
68659+
68660+int
68661+gr_check_hidden_task(const struct task_struct *task)
68662+{
68663+ if (unlikely(!(gr_status & GR_READY)))
68664+ return 0;
68665+
68666+ if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
68667+ return 1;
68668+
68669+ return 0;
68670+}
68671+
68672+int
68673+gr_check_protected_task(const struct task_struct *task)
68674+{
68675+ if (unlikely(!(gr_status & GR_READY) || !task))
68676+ return 0;
68677+
68678+ if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
68679+ task->acl != current->acl)
68680+ return 1;
68681+
68682+ return 0;
68683+}
68684+
68685+int
68686+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
68687+{
68688+ struct task_struct *p;
68689+ int ret = 0;
68690+
68691+ if (unlikely(!(gr_status & GR_READY) || !pid))
68692+ return ret;
68693+
68694+ read_lock(&tasklist_lock);
68695+ do_each_pid_task(pid, type, p) {
68696+ if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
68697+ p->acl != current->acl) {
68698+ ret = 1;
68699+ goto out;
68700+ }
68701+ } while_each_pid_task(pid, type, p);
68702+out:
68703+ read_unlock(&tasklist_lock);
68704+
68705+ return ret;
68706+}
68707+
68708+void
68709+gr_copy_label(struct task_struct *tsk)
68710+{
68711+ struct task_struct *p = current;
68712+
68713+ tsk->inherited = p->inherited;
68714+ tsk->acl_sp_role = 0;
68715+ tsk->acl_role_id = p->acl_role_id;
68716+ tsk->acl = p->acl;
68717+ tsk->role = p->role;
68718+ tsk->signal->used_accept = 0;
68719+ tsk->signal->curr_ip = p->signal->curr_ip;
68720+ tsk->signal->saved_ip = p->signal->saved_ip;
68721+ if (p->exec_file)
68722+ get_file(p->exec_file);
68723+ tsk->exec_file = p->exec_file;
68724+ tsk->is_writable = p->is_writable;
68725+ if (unlikely(p->signal->used_accept)) {
68726+ p->signal->curr_ip = 0;
68727+ p->signal->saved_ip = 0;
68728+ }
68729+
68730+ return;
68731+}
68732+
68733+extern int gr_process_kernel_setuid_ban(struct user_struct *user);
68734+
68735+int
68736+gr_check_user_change(kuid_t real, kuid_t effective, kuid_t fs)
68737+{
68738+ unsigned int i;
68739+ __u16 num;
68740+ uid_t *uidlist;
68741+ uid_t curuid;
68742+ int realok = 0;
68743+ int effectiveok = 0;
68744+ int fsok = 0;
68745+ uid_t globalreal, globaleffective, globalfs;
68746+
68747+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT)
68748+ struct user_struct *user;
68749+
68750+ if (!uid_valid(real))
68751+ goto skipit;
68752+
68753+ /* find user based on global namespace */
68754+
68755+ globalreal = GR_GLOBAL_UID(real);
68756+
68757+ user = find_user(make_kuid(&init_user_ns, globalreal));
68758+ if (user == NULL)
68759+ goto skipit;
68760+
68761+ if (gr_process_kernel_setuid_ban(user)) {
68762+ /* for find_user */
68763+ free_uid(user);
68764+ return 1;
68765+ }
68766+
68767+ /* for find_user */
68768+ free_uid(user);
68769+
68770+skipit:
68771+#endif
68772+
68773+ if (unlikely(!(gr_status & GR_READY)))
68774+ return 0;
68775+
68776+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
68777+ gr_log_learn_uid_change(real, effective, fs);
68778+
68779+ num = current->acl->user_trans_num;
68780+ uidlist = current->acl->user_transitions;
68781+
68782+ if (uidlist == NULL)
68783+ return 0;
68784+
68785+ if (!uid_valid(real)) {
68786+ realok = 1;
68787+ globalreal = (uid_t)-1;
68788+ } else {
68789+ globalreal = GR_GLOBAL_UID(real);
68790+ }
68791+ if (!uid_valid(effective)) {
68792+ effectiveok = 1;
68793+ globaleffective = (uid_t)-1;
68794+ } else {
68795+ globaleffective = GR_GLOBAL_UID(effective);
68796+ }
68797+ if (!uid_valid(fs)) {
68798+ fsok = 1;
68799+ globalfs = (uid_t)-1;
68800+ } else {
68801+ globalfs = GR_GLOBAL_UID(fs);
68802+ }
68803+
68804+ if (current->acl->user_trans_type & GR_ID_ALLOW) {
68805+ for (i = 0; i < num; i++) {
68806+ curuid = uidlist[i];
68807+ if (globalreal == curuid)
68808+ realok = 1;
68809+ if (globaleffective == curuid)
68810+ effectiveok = 1;
68811+ if (globalfs == curuid)
68812+ fsok = 1;
68813+ }
68814+ } else if (current->acl->user_trans_type & GR_ID_DENY) {
68815+ for (i = 0; i < num; i++) {
68816+ curuid = uidlist[i];
68817+ if (globalreal == curuid)
68818+ break;
68819+ if (globaleffective == curuid)
68820+ break;
68821+ if (globalfs == curuid)
68822+ break;
68823+ }
68824+ /* not in deny list */
68825+ if (i == num) {
68826+ realok = 1;
68827+ effectiveok = 1;
68828+ fsok = 1;
68829+ }
68830+ }
68831+
68832+ if (realok && effectiveok && fsok)
68833+ return 0;
68834+ else {
68835+ gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : globalfs) : globaleffective) : globalreal);
68836+ return 1;
68837+ }
68838+}
68839+
68840+int
68841+gr_check_group_change(kgid_t real, kgid_t effective, kgid_t fs)
68842+{
68843+ unsigned int i;
68844+ __u16 num;
68845+ gid_t *gidlist;
68846+ gid_t curgid;
68847+ int realok = 0;
68848+ int effectiveok = 0;
68849+ int fsok = 0;
68850+ gid_t globalreal, globaleffective, globalfs;
68851+
68852+ if (unlikely(!(gr_status & GR_READY)))
68853+ return 0;
68854+
68855+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
68856+ gr_log_learn_gid_change(real, effective, fs);
68857+
68858+ num = current->acl->group_trans_num;
68859+ gidlist = current->acl->group_transitions;
68860+
68861+ if (gidlist == NULL)
68862+ return 0;
68863+
68864+ if (!gid_valid(real)) {
68865+ realok = 1;
68866+ globalreal = (gid_t)-1;
68867+ } else {
68868+ globalreal = GR_GLOBAL_GID(real);
68869+ }
68870+ if (!gid_valid(effective)) {
68871+ effectiveok = 1;
68872+ globaleffective = (gid_t)-1;
68873+ } else {
68874+ globaleffective = GR_GLOBAL_GID(effective);
68875+ }
68876+ if (!gid_valid(fs)) {
68877+ fsok = 1;
68878+ globalfs = (gid_t)-1;
68879+ } else {
68880+ globalfs = GR_GLOBAL_GID(fs);
68881+ }
68882+
68883+ if (current->acl->group_trans_type & GR_ID_ALLOW) {
68884+ for (i = 0; i < num; i++) {
68885+ curgid = gidlist[i];
68886+ if (globalreal == curgid)
68887+ realok = 1;
68888+ if (globaleffective == curgid)
68889+ effectiveok = 1;
68890+ if (globalfs == curgid)
68891+ fsok = 1;
68892+ }
68893+ } else if (current->acl->group_trans_type & GR_ID_DENY) {
68894+ for (i = 0; i < num; i++) {
68895+ curgid = gidlist[i];
68896+ if (globalreal == curgid)
68897+ break;
68898+ if (globaleffective == curgid)
68899+ break;
68900+ if (globalfs == curgid)
68901+ break;
68902+ }
68903+ /* not in deny list */
68904+ if (i == num) {
68905+ realok = 1;
68906+ effectiveok = 1;
68907+ fsok = 1;
68908+ }
68909+ }
68910+
68911+ if (realok && effectiveok && fsok)
68912+ return 0;
68913+ else {
68914+ gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : globalfs) : globaleffective) : globalreal);
68915+ return 1;
68916+ }
68917+}
68918+
68919+extern int gr_acl_is_capable(const int cap);
68920+
68921+void
68922+gr_set_role_label(struct task_struct *task, const kuid_t kuid, const kgid_t kgid)
68923+{
68924+ struct acl_role_label *role = task->role;
68925+ struct acl_subject_label *subj = NULL;
68926+ struct acl_object_label *obj;
68927+ struct file *filp;
68928+ uid_t uid;
68929+ gid_t gid;
68930+
68931+ if (unlikely(!(gr_status & GR_READY)))
68932+ return;
68933+
68934+ uid = GR_GLOBAL_UID(kuid);
68935+ gid = GR_GLOBAL_GID(kgid);
68936+
68937+ filp = task->exec_file;
68938+
68939+ /* kernel process, we'll give them the kernel role */
68940+ if (unlikely(!filp)) {
68941+ task->role = running_polstate.kernel_role;
68942+ task->acl = running_polstate.kernel_role->root_label;
68943+ return;
68944+ } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL)) {
68945+ /* save the current ip at time of role lookup so that the proper
68946+ IP will be learned for role_allowed_ip */
68947+ task->signal->saved_ip = task->signal->curr_ip;
68948+ role = lookup_acl_role_label(task, uid, gid);
68949+ }
68950+
68951+ /* don't change the role if we're not a privileged process */
68952+ if (role && task->role != role &&
68953+ (((role->roletype & GR_ROLE_USER) && !gr_acl_is_capable(CAP_SETUID)) ||
68954+ ((role->roletype & GR_ROLE_GROUP) && !gr_acl_is_capable(CAP_SETGID))))
68955+ return;
68956+
68957+ /* perform subject lookup in possibly new role
68958+ we can use this result below in the case where role == task->role
68959+ */
68960+ subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
68961+
68962+ /* if we changed uid/gid, but result in the same role
68963+ and are using inheritance, don't lose the inherited subject
68964+ if current subject is other than what normal lookup
68965+ would result in, we arrived via inheritance, don't
68966+ lose subject
68967+ */
68968+ if (role != task->role || (!(task->acl->mode & GR_INHERITLEARN) &&
68969+ (subj == task->acl)))
68970+ task->acl = subj;
68971+
68972+ /* leave task->inherited unaffected */
68973+
68974+ task->role = role;
68975+
68976+ task->is_writable = 0;
68977+
68978+ /* ignore additional mmap checks for processes that are writable
68979+ by the default ACL */
68980+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, running_polstate.default_role->root_label);
68981+ if (unlikely(obj->mode & GR_WRITE))
68982+ task->is_writable = 1;
68983+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
68984+ if (unlikely(obj->mode & GR_WRITE))
68985+ task->is_writable = 1;
68986+
68987+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
68988+ printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task_pid_nr(task), task->role->rolename, task->acl->filename);
68989+#endif
68990+
68991+ gr_set_proc_res(task);
68992+
68993+ return;
68994+}
68995+
68996+int
68997+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
68998+ const int unsafe_flags)
68999+{
69000+ struct task_struct *task = current;
69001+ struct acl_subject_label *newacl;
69002+ struct acl_object_label *obj;
69003+ __u32 retmode;
69004+
69005+ if (unlikely(!(gr_status & GR_READY)))
69006+ return 0;
69007+
69008+ newacl = chk_subj_label(dentry, mnt, task->role);
69009+
69010+ /* special handling for if we did an strace -f -p <pid> from an admin role, where pid then
69011+ did an exec
69012+ */
69013+ rcu_read_lock();
69014+ read_lock(&tasklist_lock);
69015+ if (task->ptrace && task->parent && ((task->parent->role->roletype & GR_ROLE_GOD) ||
69016+ (task->parent->acl->mode & GR_POVERRIDE))) {
69017+ read_unlock(&tasklist_lock);
69018+ rcu_read_unlock();
69019+ goto skip_check;
69020+ }
69021+ read_unlock(&tasklist_lock);
69022+ rcu_read_unlock();
69023+
69024+ if (unsafe_flags && !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
69025+ !(task->role->roletype & GR_ROLE_GOD) &&
69026+ !gr_search_file(dentry, GR_PTRACERD, mnt) &&
69027+ !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
69028+ if (unsafe_flags & LSM_UNSAFE_SHARE)
69029+ gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
69030+ else
69031+ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
69032+ return -EACCES;
69033+ }
69034+
69035+skip_check:
69036+
69037+ obj = chk_obj_label(dentry, mnt, task->acl);
69038+ retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
69039+
69040+ if (!(task->acl->mode & GR_INHERITLEARN) &&
69041+ ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
69042+ if (obj->nested)
69043+ task->acl = obj->nested;
69044+ else
69045+ task->acl = newacl;
69046+ task->inherited = 0;
69047+ } else {
69048+ task->inherited = 1;
69049+ if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
69050+ gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
69051+ }
69052+
69053+ task->is_writable = 0;
69054+
69055+ /* ignore additional mmap checks for processes that are writable
69056+ by the default ACL */
69057+ obj = chk_obj_label(dentry, mnt, running_polstate.default_role->root_label);
69058+ if (unlikely(obj->mode & GR_WRITE))
69059+ task->is_writable = 1;
69060+ obj = chk_obj_label(dentry, mnt, task->role->root_label);
69061+ if (unlikely(obj->mode & GR_WRITE))
69062+ task->is_writable = 1;
69063+
69064+ gr_set_proc_res(task);
69065+
69066+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
69067+ printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task_pid_nr(task), task->role->rolename, task->acl->filename);
69068+#endif
69069+ return 0;
69070+}
69071+
69072+/* always called with valid inodev ptr */
69073+static void
69074+do_handle_delete(struct inodev_entry *inodev, const ino_t ino, const dev_t dev)
69075+{
69076+ struct acl_object_label *matchpo;
69077+ struct acl_subject_label *matchps;
69078+ struct acl_subject_label *subj;
69079+ struct acl_role_label *role;
69080+ unsigned int x;
69081+
69082+ FOR_EACH_ROLE_START(role)
69083+ FOR_EACH_SUBJECT_START(role, subj, x)
69084+ if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
69085+ matchpo->mode |= GR_DELETED;
69086+ FOR_EACH_SUBJECT_END(subj,x)
69087+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
69088+ /* nested subjects aren't in the role's subj_hash table */
69089+ if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
69090+ matchpo->mode |= GR_DELETED;
69091+ FOR_EACH_NESTED_SUBJECT_END(subj)
69092+ if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
69093+ matchps->mode |= GR_DELETED;
69094+ FOR_EACH_ROLE_END(role)
69095+
69096+ inodev->nentry->deleted = 1;
69097+
69098+ return;
69099+}
69100+
69101+void
69102+gr_handle_delete(const ino_t ino, const dev_t dev)
69103+{
69104+ struct inodev_entry *inodev;
69105+
69106+ if (unlikely(!(gr_status & GR_READY)))
69107+ return;
69108+
69109+ write_lock(&gr_inode_lock);
69110+ inodev = lookup_inodev_entry(ino, dev);
69111+ if (inodev != NULL)
69112+ do_handle_delete(inodev, ino, dev);
69113+ write_unlock(&gr_inode_lock);
69114+
69115+ return;
69116+}
69117+
69118+static void
69119+update_acl_obj_label(const ino_t oldinode, const dev_t olddevice,
69120+ const ino_t newinode, const dev_t newdevice,
69121+ struct acl_subject_label *subj)
69122+{
69123+ unsigned int index = gr_fhash(oldinode, olddevice, subj->obj_hash_size);
69124+ struct acl_object_label *match;
69125+
69126+ match = subj->obj_hash[index];
69127+
69128+ while (match && (match->inode != oldinode ||
69129+ match->device != olddevice ||
69130+ !(match->mode & GR_DELETED)))
69131+ match = match->next;
69132+
69133+ if (match && (match->inode == oldinode)
69134+ && (match->device == olddevice)
69135+ && (match->mode & GR_DELETED)) {
69136+ if (match->prev == NULL) {
69137+ subj->obj_hash[index] = match->next;
69138+ if (match->next != NULL)
69139+ match->next->prev = NULL;
69140+ } else {
69141+ match->prev->next = match->next;
69142+ if (match->next != NULL)
69143+ match->next->prev = match->prev;
69144+ }
69145+ match->prev = NULL;
69146+ match->next = NULL;
69147+ match->inode = newinode;
69148+ match->device = newdevice;
69149+ match->mode &= ~GR_DELETED;
69150+
69151+ insert_acl_obj_label(match, subj);
69152+ }
69153+
69154+ return;
69155+}
69156+
69157+static void
69158+update_acl_subj_label(const ino_t oldinode, const dev_t olddevice,
69159+ const ino_t newinode, const dev_t newdevice,
69160+ struct acl_role_label *role)
69161+{
69162+ unsigned int index = gr_fhash(oldinode, olddevice, role->subj_hash_size);
69163+ struct acl_subject_label *match;
69164+
69165+ match = role->subj_hash[index];
69166+
69167+ while (match && (match->inode != oldinode ||
69168+ match->device != olddevice ||
69169+ !(match->mode & GR_DELETED)))
69170+ match = match->next;
69171+
69172+ if (match && (match->inode == oldinode)
69173+ && (match->device == olddevice)
69174+ && (match->mode & GR_DELETED)) {
69175+ if (match->prev == NULL) {
69176+ role->subj_hash[index] = match->next;
69177+ if (match->next != NULL)
69178+ match->next->prev = NULL;
69179+ } else {
69180+ match->prev->next = match->next;
69181+ if (match->next != NULL)
69182+ match->next->prev = match->prev;
69183+ }
69184+ match->prev = NULL;
69185+ match->next = NULL;
69186+ match->inode = newinode;
69187+ match->device = newdevice;
69188+ match->mode &= ~GR_DELETED;
69189+
69190+ insert_acl_subj_label(match, role);
69191+ }
69192+
69193+ return;
69194+}
69195+
69196+static void
69197+update_inodev_entry(const ino_t oldinode, const dev_t olddevice,
69198+ const ino_t newinode, const dev_t newdevice)
69199+{
69200+ unsigned int index = gr_fhash(oldinode, olddevice, running_polstate.inodev_set.i_size);
69201+ struct inodev_entry *match;
69202+
69203+ match = running_polstate.inodev_set.i_hash[index];
69204+
69205+ while (match && (match->nentry->inode != oldinode ||
69206+ match->nentry->device != olddevice || !match->nentry->deleted))
69207+ match = match->next;
69208+
69209+ if (match && (match->nentry->inode == oldinode)
69210+ && (match->nentry->device == olddevice) &&
69211+ match->nentry->deleted) {
69212+ if (match->prev == NULL) {
69213+ running_polstate.inodev_set.i_hash[index] = match->next;
69214+ if (match->next != NULL)
69215+ match->next->prev = NULL;
69216+ } else {
69217+ match->prev->next = match->next;
69218+ if (match->next != NULL)
69219+ match->next->prev = match->prev;
69220+ }
69221+ match->prev = NULL;
69222+ match->next = NULL;
69223+ match->nentry->inode = newinode;
69224+ match->nentry->device = newdevice;
69225+ match->nentry->deleted = 0;
69226+
69227+ insert_inodev_entry(match);
69228+ }
69229+
69230+ return;
69231+}
69232+
69233+static void
69234+__do_handle_create(const struct name_entry *matchn, ino_t ino, dev_t dev)
69235+{
69236+ struct acl_subject_label *subj;
69237+ struct acl_role_label *role;
69238+ unsigned int x;
69239+
69240+ FOR_EACH_ROLE_START(role)
69241+ update_acl_subj_label(matchn->inode, matchn->device, ino, dev, role);
69242+
69243+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
69244+ if ((subj->inode == ino) && (subj->device == dev)) {
69245+ subj->inode = ino;
69246+ subj->device = dev;
69247+ }
69248+ /* nested subjects aren't in the role's subj_hash table */
69249+ update_acl_obj_label(matchn->inode, matchn->device,
69250+ ino, dev, subj);
69251+ FOR_EACH_NESTED_SUBJECT_END(subj)
69252+ FOR_EACH_SUBJECT_START(role, subj, x)
69253+ update_acl_obj_label(matchn->inode, matchn->device,
69254+ ino, dev, subj);
69255+ FOR_EACH_SUBJECT_END(subj,x)
69256+ FOR_EACH_ROLE_END(role)
69257+
69258+ update_inodev_entry(matchn->inode, matchn->device, ino, dev);
69259+
69260+ return;
69261+}
69262+
69263+static void
69264+do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
69265+ const struct vfsmount *mnt)
69266+{
69267+ ino_t ino = dentry->d_inode->i_ino;
69268+ dev_t dev = __get_dev(dentry);
69269+
69270+ __do_handle_create(matchn, ino, dev);
69271+
69272+ return;
69273+}
69274+
69275+void
69276+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
69277+{
69278+ struct name_entry *matchn;
69279+
69280+ if (unlikely(!(gr_status & GR_READY)))
69281+ return;
69282+
69283+ preempt_disable();
69284+ matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
69285+
69286+ if (unlikely((unsigned long)matchn)) {
69287+ write_lock(&gr_inode_lock);
69288+ do_handle_create(matchn, dentry, mnt);
69289+ write_unlock(&gr_inode_lock);
69290+ }
69291+ preempt_enable();
69292+
69293+ return;
69294+}
69295+
69296+void
69297+gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
69298+{
69299+ struct name_entry *matchn;
69300+
69301+ if (unlikely(!(gr_status & GR_READY)))
69302+ return;
69303+
69304+ preempt_disable();
69305+ matchn = lookup_name_entry(gr_to_proc_filename_rbac(dentry, init_pid_ns.proc_mnt));
69306+
69307+ if (unlikely((unsigned long)matchn)) {
69308+ write_lock(&gr_inode_lock);
69309+ __do_handle_create(matchn, inode->i_ino, inode->i_sb->s_dev);
69310+ write_unlock(&gr_inode_lock);
69311+ }
69312+ preempt_enable();
69313+
69314+ return;
69315+}
69316+
69317+void
69318+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
69319+ struct dentry *old_dentry,
69320+ struct dentry *new_dentry,
69321+ struct vfsmount *mnt, const __u8 replace)
69322+{
69323+ struct name_entry *matchn;
69324+ struct inodev_entry *inodev;
69325+ struct inode *inode = new_dentry->d_inode;
69326+ ino_t old_ino = old_dentry->d_inode->i_ino;
69327+ dev_t old_dev = __get_dev(old_dentry);
69328+
69329+ /* vfs_rename swaps the name and parent link for old_dentry and
69330+ new_dentry
69331+ at this point, old_dentry has the new name, parent link, and inode
69332+ for the renamed file
69333+ if a file is being replaced by a rename, new_dentry has the inode
69334+ and name for the replaced file
69335+ */
69336+
69337+ if (unlikely(!(gr_status & GR_READY)))
69338+ return;
69339+
69340+ preempt_disable();
69341+ matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
69342+
69343+ /* we wouldn't have to check d_inode if it weren't for
69344+ NFS silly-renaming
69345+ */
69346+
69347+ write_lock(&gr_inode_lock);
69348+ if (unlikely(replace && inode)) {
69349+ ino_t new_ino = inode->i_ino;
69350+ dev_t new_dev = __get_dev(new_dentry);
69351+
69352+ inodev = lookup_inodev_entry(new_ino, new_dev);
69353+ if (inodev != NULL && ((inode->i_nlink <= 1) || S_ISDIR(inode->i_mode)))
69354+ do_handle_delete(inodev, new_ino, new_dev);
69355+ }
69356+
69357+ inodev = lookup_inodev_entry(old_ino, old_dev);
69358+ if (inodev != NULL && ((old_dentry->d_inode->i_nlink <= 1) || S_ISDIR(old_dentry->d_inode->i_mode)))
69359+ do_handle_delete(inodev, old_ino, old_dev);
69360+
69361+ if (unlikely((unsigned long)matchn))
69362+ do_handle_create(matchn, old_dentry, mnt);
69363+
69364+ write_unlock(&gr_inode_lock);
69365+ preempt_enable();
69366+
69367+ return;
69368+}
69369+
69370+#if defined(CONFIG_GRKERNSEC_RESLOG) || !defined(CONFIG_GRKERNSEC_NO_RBAC)
69371+static const unsigned long res_learn_bumps[GR_NLIMITS] = {
69372+ [RLIMIT_CPU] = GR_RLIM_CPU_BUMP,
69373+ [RLIMIT_FSIZE] = GR_RLIM_FSIZE_BUMP,
69374+ [RLIMIT_DATA] = GR_RLIM_DATA_BUMP,
69375+ [RLIMIT_STACK] = GR_RLIM_STACK_BUMP,
69376+ [RLIMIT_CORE] = GR_RLIM_CORE_BUMP,
69377+ [RLIMIT_RSS] = GR_RLIM_RSS_BUMP,
69378+ [RLIMIT_NPROC] = GR_RLIM_NPROC_BUMP,
69379+ [RLIMIT_NOFILE] = GR_RLIM_NOFILE_BUMP,
69380+ [RLIMIT_MEMLOCK] = GR_RLIM_MEMLOCK_BUMP,
69381+ [RLIMIT_AS] = GR_RLIM_AS_BUMP,
69382+ [RLIMIT_LOCKS] = GR_RLIM_LOCKS_BUMP,
69383+ [RLIMIT_SIGPENDING] = GR_RLIM_SIGPENDING_BUMP,
69384+ [RLIMIT_MSGQUEUE] = GR_RLIM_MSGQUEUE_BUMP,
69385+ [RLIMIT_NICE] = GR_RLIM_NICE_BUMP,
69386+ [RLIMIT_RTPRIO] = GR_RLIM_RTPRIO_BUMP,
69387+ [RLIMIT_RTTIME] = GR_RLIM_RTTIME_BUMP
69388+};
69389+
69390+void
69391+gr_learn_resource(const struct task_struct *task,
69392+ const int res, const unsigned long wanted, const int gt)
69393+{
69394+ struct acl_subject_label *acl;
69395+ const struct cred *cred;
69396+
69397+ if (unlikely((gr_status & GR_READY) &&
69398+ task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
69399+ goto skip_reslog;
69400+
69401+ gr_log_resource(task, res, wanted, gt);
69402+skip_reslog:
69403+
69404+ if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
69405+ return;
69406+
69407+ acl = task->acl;
69408+
69409+ if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
69410+ !(acl->resmask & (1U << (unsigned short) res))))
69411+ return;
69412+
69413+ if (wanted >= acl->res[res].rlim_cur) {
69414+ unsigned long res_add;
69415+
69416+ res_add = wanted + res_learn_bumps[res];
69417+
69418+ acl->res[res].rlim_cur = res_add;
69419+
69420+ if (wanted > acl->res[res].rlim_max)
69421+ acl->res[res].rlim_max = res_add;
69422+
69423+ /* only log the subject filename, since resource logging is supported for
69424+ single-subject learning only */
69425+ rcu_read_lock();
69426+ cred = __task_cred(task);
69427+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
69428+ task->role->roletype, GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), acl->filename,
69429+ acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
69430+ "", (unsigned long) res, &task->signal->saved_ip);
69431+ rcu_read_unlock();
69432+ }
69433+
69434+ return;
69435+}
69436+EXPORT_SYMBOL_GPL(gr_learn_resource);
69437+#endif
69438+
69439+#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
69440+void
69441+pax_set_initial_flags(struct linux_binprm *bprm)
69442+{
69443+ struct task_struct *task = current;
69444+ struct acl_subject_label *proc;
69445+ unsigned long flags;
69446+
69447+ if (unlikely(!(gr_status & GR_READY)))
69448+ return;
69449+
69450+ flags = pax_get_flags(task);
69451+
69452+ proc = task->acl;
69453+
69454+ if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
69455+ flags &= ~MF_PAX_PAGEEXEC;
69456+ if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
69457+ flags &= ~MF_PAX_SEGMEXEC;
69458+ if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
69459+ flags &= ~MF_PAX_RANDMMAP;
69460+ if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
69461+ flags &= ~MF_PAX_EMUTRAMP;
69462+ if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
69463+ flags &= ~MF_PAX_MPROTECT;
69464+
69465+ if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
69466+ flags |= MF_PAX_PAGEEXEC;
69467+ if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
69468+ flags |= MF_PAX_SEGMEXEC;
69469+ if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
69470+ flags |= MF_PAX_RANDMMAP;
69471+ if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
69472+ flags |= MF_PAX_EMUTRAMP;
69473+ if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
69474+ flags |= MF_PAX_MPROTECT;
69475+
69476+ pax_set_flags(task, flags);
69477+
69478+ return;
69479+}
69480+#endif
69481+
69482+int
69483+gr_handle_proc_ptrace(struct task_struct *task)
69484+{
69485+ struct file *filp;
69486+ struct task_struct *tmp = task;
69487+ struct task_struct *curtemp = current;
69488+ __u32 retmode;
69489+
69490+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
69491+ if (unlikely(!(gr_status & GR_READY)))
69492+ return 0;
69493+#endif
69494+
69495+ read_lock(&tasklist_lock);
69496+ read_lock(&grsec_exec_file_lock);
69497+ filp = task->exec_file;
69498+
69499+ while (task_pid_nr(tmp) > 0) {
69500+ if (tmp == curtemp)
69501+ break;
69502+ tmp = tmp->real_parent;
69503+ }
69504+
69505+ if (!filp || (task_pid_nr(tmp) == 0 && ((grsec_enable_harden_ptrace && gr_is_global_nonroot(current_uid()) && !(gr_status & GR_READY)) ||
69506+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) {
69507+ read_unlock(&grsec_exec_file_lock);
69508+ read_unlock(&tasklist_lock);
69509+ return 1;
69510+ }
69511+
69512+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
69513+ if (!(gr_status & GR_READY)) {
69514+ read_unlock(&grsec_exec_file_lock);
69515+ read_unlock(&tasklist_lock);
69516+ return 0;
69517+ }
69518+#endif
69519+
69520+ retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
69521+ read_unlock(&grsec_exec_file_lock);
69522+ read_unlock(&tasklist_lock);
69523+
69524+ if (retmode & GR_NOPTRACE)
69525+ return 1;
69526+
69527+ if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
69528+ && (current->acl != task->acl || (current->acl != current->role->root_label
69529+ && task_pid_nr(current) != task_pid_nr(task))))
69530+ return 1;
69531+
69532+ return 0;
69533+}
69534+
69535+void task_grsec_rbac(struct seq_file *m, struct task_struct *p)
69536+{
69537+ if (unlikely(!(gr_status & GR_READY)))
69538+ return;
69539+
69540+ if (!(current->role->roletype & GR_ROLE_GOD))
69541+ return;
69542+
69543+ seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n",
69544+ p->role->rolename, gr_task_roletype_to_char(p),
69545+ p->acl->filename);
69546+}
69547+
69548+int
69549+gr_handle_ptrace(struct task_struct *task, const long request)
69550+{
69551+ struct task_struct *tmp = task;
69552+ struct task_struct *curtemp = current;
69553+ __u32 retmode;
69554+
69555+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
69556+ if (unlikely(!(gr_status & GR_READY)))
69557+ return 0;
69558+#endif
69559+ if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
69560+ read_lock(&tasklist_lock);
69561+ while (task_pid_nr(tmp) > 0) {
69562+ if (tmp == curtemp)
69563+ break;
69564+ tmp = tmp->real_parent;
69565+ }
69566+
69567+ if (task_pid_nr(tmp) == 0 && ((grsec_enable_harden_ptrace && gr_is_global_nonroot(current_uid()) && !(gr_status & GR_READY)) ||
69568+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
69569+ read_unlock(&tasklist_lock);
69570+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
69571+ return 1;
69572+ }
69573+ read_unlock(&tasklist_lock);
69574+ }
69575+
69576+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
69577+ if (!(gr_status & GR_READY))
69578+ return 0;
69579+#endif
69580+
69581+ read_lock(&grsec_exec_file_lock);
69582+ if (unlikely(!task->exec_file)) {
69583+ read_unlock(&grsec_exec_file_lock);
69584+ return 0;
69585+ }
69586+
69587+ retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt);
69588+ read_unlock(&grsec_exec_file_lock);
69589+
69590+ if (retmode & GR_NOPTRACE) {
69591+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
69592+ return 1;
69593+ }
69594+
69595+ if (retmode & GR_PTRACERD) {
69596+ switch (request) {
69597+ case PTRACE_SEIZE:
69598+ case PTRACE_POKETEXT:
69599+ case PTRACE_POKEDATA:
69600+ case PTRACE_POKEUSR:
69601+#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
69602+ case PTRACE_SETREGS:
69603+ case PTRACE_SETFPREGS:
69604+#endif
69605+#ifdef CONFIG_X86
69606+ case PTRACE_SETFPXREGS:
69607+#endif
69608+#ifdef CONFIG_ALTIVEC
69609+ case PTRACE_SETVRREGS:
69610+#endif
69611+ return 1;
69612+ default:
69613+ return 0;
69614+ }
69615+ } else if (!(current->acl->mode & GR_POVERRIDE) &&
69616+ !(current->role->roletype & GR_ROLE_GOD) &&
69617+ (current->acl != task->acl)) {
69618+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
69619+ return 1;
69620+ }
69621+
69622+ return 0;
69623+}
69624+
69625+static int is_writable_mmap(const struct file *filp)
69626+{
69627+ struct task_struct *task = current;
69628+ struct acl_object_label *obj, *obj2;
69629+
69630+ if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
69631+ !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt != shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) {
69632+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, running_polstate.default_role->root_label);
69633+ obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt,
69634+ task->role->root_label);
69635+ if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
69636+ gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt);
69637+ return 1;
69638+ }
69639+ }
69640+ return 0;
69641+}
69642+
69643+int
69644+gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
69645+{
69646+ __u32 mode;
69647+
69648+ if (unlikely(!file || !(prot & PROT_EXEC)))
69649+ return 1;
69650+
69651+ if (is_writable_mmap(file))
69652+ return 0;
69653+
69654+ mode =
69655+ gr_search_file(file->f_path.dentry,
69656+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
69657+ file->f_path.mnt);
69658+
69659+ if (!gr_tpe_allow(file))
69660+ return 0;
69661+
69662+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
69663+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
69664+ return 0;
69665+ } else if (unlikely(!(mode & GR_EXEC))) {
69666+ return 0;
69667+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
69668+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
69669+ return 1;
69670+ }
69671+
69672+ return 1;
69673+}
69674+
69675+int
69676+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
69677+{
69678+ __u32 mode;
69679+
69680+ if (unlikely(!file || !(prot & PROT_EXEC)))
69681+ return 1;
69682+
69683+ if (is_writable_mmap(file))
69684+ return 0;
69685+
69686+ mode =
69687+ gr_search_file(file->f_path.dentry,
69688+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
69689+ file->f_path.mnt);
69690+
69691+ if (!gr_tpe_allow(file))
69692+ return 0;
69693+
69694+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
69695+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
69696+ return 0;
69697+ } else if (unlikely(!(mode & GR_EXEC))) {
69698+ return 0;
69699+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
69700+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
69701+ return 1;
69702+ }
69703+
69704+ return 1;
69705+}
69706+
69707+void
69708+gr_acl_handle_psacct(struct task_struct *task, const long code)
69709+{
69710+ unsigned long runtime, cputime;
69711+ cputime_t utime, stime;
69712+ unsigned int wday, cday;
69713+ __u8 whr, chr;
69714+ __u8 wmin, cmin;
69715+ __u8 wsec, csec;
69716+ struct timespec timeval;
69717+
69718+ if (unlikely(!(gr_status & GR_READY) || !task->acl ||
69719+ !(task->acl->mode & GR_PROCACCT)))
69720+ return;
69721+
69722+ do_posix_clock_monotonic_gettime(&timeval);
69723+ runtime = timeval.tv_sec - task->start_time.tv_sec;
69724+ wday = runtime / (60 * 60 * 24);
69725+ runtime -= wday * (60 * 60 * 24);
69726+ whr = runtime / (60 * 60);
69727+ runtime -= whr * (60 * 60);
69728+ wmin = runtime / 60;
69729+ runtime -= wmin * 60;
69730+ wsec = runtime;
69731+
69732+ task_cputime(task, &utime, &stime);
69733+ cputime = cputime_to_secs(utime + stime);
69734+ cday = cputime / (60 * 60 * 24);
69735+ cputime -= cday * (60 * 60 * 24);
69736+ chr = cputime / (60 * 60);
69737+ cputime -= chr * (60 * 60);
69738+ cmin = cputime / 60;
69739+ cputime -= cmin * 60;
69740+ csec = cputime;
69741+
69742+ gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
69743+
69744+ return;
69745+}
69746+
69747+#ifdef CONFIG_TASKSTATS
69748+int gr_is_taskstats_denied(int pid)
69749+{
69750+ struct task_struct *task;
69751+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
69752+ const struct cred *cred;
69753+#endif
69754+ int ret = 0;
69755+
69756+ /* restrict taskstats viewing to un-chrooted root users
69757+ who have the 'view' subject flag if the RBAC system is enabled
69758+ */
69759+
69760+ rcu_read_lock();
69761+ read_lock(&tasklist_lock);
69762+ task = find_task_by_vpid(pid);
69763+ if (task) {
69764+#ifdef CONFIG_GRKERNSEC_CHROOT
69765+ if (proc_is_chrooted(task))
69766+ ret = -EACCES;
69767+#endif
69768+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
69769+ cred = __task_cred(task);
69770+#ifdef CONFIG_GRKERNSEC_PROC_USER
69771+ if (gr_is_global_nonroot(cred->uid))
69772+ ret = -EACCES;
69773+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
69774+ if (gr_is_global_nonroot(cred->uid) && !groups_search(cred->group_info, grsec_proc_gid))
69775+ ret = -EACCES;
69776+#endif
69777+#endif
69778+ if (gr_status & GR_READY) {
69779+ if (!(task->acl->mode & GR_VIEW))
69780+ ret = -EACCES;
69781+ }
69782+ } else
69783+ ret = -ENOENT;
69784+
69785+ read_unlock(&tasklist_lock);
69786+ rcu_read_unlock();
69787+
69788+ return ret;
69789+}
69790+#endif
69791+
69792+/* AUXV entries are filled via a descendant of search_binary_handler
69793+ after we've already applied the subject for the target
69794+*/
69795+int gr_acl_enable_at_secure(void)
69796+{
69797+ if (unlikely(!(gr_status & GR_READY)))
69798+ return 0;
69799+
69800+ if (current->acl->mode & GR_ATSECURE)
69801+ return 1;
69802+
69803+ return 0;
69804+}
69805+
69806+int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const ino_t ino)
69807+{
69808+ struct task_struct *task = current;
69809+ struct dentry *dentry = file->f_path.dentry;
69810+ struct vfsmount *mnt = file->f_path.mnt;
69811+ struct acl_object_label *obj, *tmp;
69812+ struct acl_subject_label *subj;
69813+ unsigned int bufsize;
69814+ int is_not_root;
69815+ char *path;
69816+ dev_t dev = __get_dev(dentry);
69817+
69818+ if (unlikely(!(gr_status & GR_READY)))
69819+ return 1;
69820+
69821+ if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
69822+ return 1;
69823+
69824+ /* ignore Eric Biederman */
69825+ if (IS_PRIVATE(dentry->d_inode))
69826+ return 1;
69827+
69828+ subj = task->acl;
69829+ read_lock(&gr_inode_lock);
69830+ do {
69831+ obj = lookup_acl_obj_label(ino, dev, subj);
69832+ if (obj != NULL) {
69833+ read_unlock(&gr_inode_lock);
69834+ return (obj->mode & GR_FIND) ? 1 : 0;
69835+ }
69836+ } while ((subj = subj->parent_subject));
69837+ read_unlock(&gr_inode_lock);
69838+
69839+ /* this is purely an optimization since we're looking for an object
69840+ for the directory we're doing a readdir on
69841+ if it's possible for any globbed object to match the entry we're
69842+ filling into the directory, then the object we find here will be
69843+ an anchor point with attached globbed objects
69844+ */
69845+ obj = chk_obj_label_noglob(dentry, mnt, task->acl);
69846+ if (obj->globbed == NULL)
69847+ return (obj->mode & GR_FIND) ? 1 : 0;
69848+
69849+ is_not_root = ((obj->filename[0] == '/') &&
69850+ (obj->filename[1] == '\0')) ? 0 : 1;
69851+ bufsize = PAGE_SIZE - namelen - is_not_root;
69852+
69853+ /* check bufsize > PAGE_SIZE || bufsize == 0 */
69854+ if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
69855+ return 1;
69856+
69857+ preempt_disable();
69858+ path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
69859+ bufsize);
69860+
69861+ bufsize = strlen(path);
69862+
69863+ /* if base is "/", don't append an additional slash */
69864+ if (is_not_root)
69865+ *(path + bufsize) = '/';
69866+ memcpy(path + bufsize + is_not_root, name, namelen);
69867+ *(path + bufsize + namelen + is_not_root) = '\0';
69868+
69869+ tmp = obj->globbed;
69870+ while (tmp) {
69871+ if (!glob_match(tmp->filename, path)) {
69872+ preempt_enable();
69873+ return (tmp->mode & GR_FIND) ? 1 : 0;
69874+ }
69875+ tmp = tmp->next;
69876+ }
69877+ preempt_enable();
69878+ return (obj->mode & GR_FIND) ? 1 : 0;
69879+}
69880+
69881+void gr_put_exec_file(struct task_struct *task)
69882+{
69883+ struct file *filp;
69884+
69885+ write_lock(&grsec_exec_file_lock);
69886+ filp = task->exec_file;
69887+ task->exec_file = NULL;
69888+ write_unlock(&grsec_exec_file_lock);
69889+
69890+ if (filp)
69891+ fput(filp);
69892+
69893+ return;
69894+}
69895+
69896+
69897+#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE
69898+EXPORT_SYMBOL_GPL(gr_acl_is_enabled);
69899+#endif
69900+#ifdef CONFIG_SECURITY
69901+EXPORT_SYMBOL_GPL(gr_check_user_change);
69902+EXPORT_SYMBOL_GPL(gr_check_group_change);
69903+#endif
69904+
69905diff --git a/grsecurity/gracl_alloc.c b/grsecurity/gracl_alloc.c
69906new file mode 100644
69907index 0000000..18ffbbd
69908--- /dev/null
69909+++ b/grsecurity/gracl_alloc.c
69910@@ -0,0 +1,105 @@
69911+#include <linux/kernel.h>
69912+#include <linux/mm.h>
69913+#include <linux/slab.h>
69914+#include <linux/vmalloc.h>
69915+#include <linux/gracl.h>
69916+#include <linux/grsecurity.h>
69917+
69918+static struct gr_alloc_state __current_alloc_state = { 1, 1, NULL };
69919+struct gr_alloc_state *current_alloc_state = &__current_alloc_state;
69920+
69921+static __inline__ int
69922+alloc_pop(void)
69923+{
69924+ if (current_alloc_state->alloc_stack_next == 1)
69925+ return 0;
69926+
69927+ kfree(current_alloc_state->alloc_stack[current_alloc_state->alloc_stack_next - 2]);
69928+
69929+ current_alloc_state->alloc_stack_next--;
69930+
69931+ return 1;
69932+}
69933+
69934+static __inline__ int
69935+alloc_push(void *buf)
69936+{
69937+ if (current_alloc_state->alloc_stack_next >= current_alloc_state->alloc_stack_size)
69938+ return 1;
69939+
69940+ current_alloc_state->alloc_stack[current_alloc_state->alloc_stack_next - 1] = buf;
69941+
69942+ current_alloc_state->alloc_stack_next++;
69943+
69944+ return 0;
69945+}
69946+
69947+void *
69948+acl_alloc(unsigned long len)
69949+{
69950+ void *ret = NULL;
69951+
69952+ if (!len || len > PAGE_SIZE)
69953+ goto out;
69954+
69955+ ret = kmalloc(len, GFP_KERNEL);
69956+
69957+ if (ret) {
69958+ if (alloc_push(ret)) {
69959+ kfree(ret);
69960+ ret = NULL;
69961+ }
69962+ }
69963+
69964+out:
69965+ return ret;
69966+}
69967+
69968+void *
69969+acl_alloc_num(unsigned long num, unsigned long len)
69970+{
69971+ if (!len || (num > (PAGE_SIZE / len)))
69972+ return NULL;
69973+
69974+ return acl_alloc(num * len);
69975+}
69976+
69977+void
69978+acl_free_all(void)
69979+{
69980+ if (!current_alloc_state->alloc_stack)
69981+ return;
69982+
69983+ while (alloc_pop()) ;
69984+
69985+ if (current_alloc_state->alloc_stack) {
69986+ if ((current_alloc_state->alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
69987+ kfree(current_alloc_state->alloc_stack);
69988+ else
69989+ vfree(current_alloc_state->alloc_stack);
69990+ }
69991+
69992+ current_alloc_state->alloc_stack = NULL;
69993+ current_alloc_state->alloc_stack_size = 1;
69994+ current_alloc_state->alloc_stack_next = 1;
69995+
69996+ return;
69997+}
69998+
69999+int
70000+acl_alloc_stack_init(unsigned long size)
70001+{
70002+ if ((size * sizeof (void *)) <= PAGE_SIZE)
70003+ current_alloc_state->alloc_stack =
70004+ (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
70005+ else
70006+ current_alloc_state->alloc_stack = (void **) vmalloc(size * sizeof (void *));
70007+
70008+ current_alloc_state->alloc_stack_size = size;
70009+ current_alloc_state->alloc_stack_next = 1;
70010+
70011+ if (!current_alloc_state->alloc_stack)
70012+ return 0;
70013+ else
70014+ return 1;
70015+}
70016diff --git a/grsecurity/gracl_cap.c b/grsecurity/gracl_cap.c
70017new file mode 100644
70018index 0000000..bdd51ea
70019--- /dev/null
70020+++ b/grsecurity/gracl_cap.c
70021@@ -0,0 +1,110 @@
70022+#include <linux/kernel.h>
70023+#include <linux/module.h>
70024+#include <linux/sched.h>
70025+#include <linux/gracl.h>
70026+#include <linux/grsecurity.h>
70027+#include <linux/grinternal.h>
70028+
70029+extern const char *captab_log[];
70030+extern int captab_log_entries;
70031+
70032+int gr_task_acl_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
70033+{
70034+ struct acl_subject_label *curracl;
70035+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
70036+ kernel_cap_t cap_audit = __cap_empty_set;
70037+
70038+ if (!gr_acl_is_enabled())
70039+ return 1;
70040+
70041+ curracl = task->acl;
70042+
70043+ cap_drop = curracl->cap_lower;
70044+ cap_mask = curracl->cap_mask;
70045+ cap_audit = curracl->cap_invert_audit;
70046+
70047+ while ((curracl = curracl->parent_subject)) {
70048+ /* if the cap isn't specified in the current computed mask but is specified in the
70049+ current level subject, and is lowered in the current level subject, then add
70050+ it to the set of dropped capabilities
70051+ otherwise, add the current level subject's mask to the current computed mask
70052+ */
70053+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
70054+ cap_raise(cap_mask, cap);
70055+ if (cap_raised(curracl->cap_lower, cap))
70056+ cap_raise(cap_drop, cap);
70057+ if (cap_raised(curracl->cap_invert_audit, cap))
70058+ cap_raise(cap_audit, cap);
70059+ }
70060+ }
70061+
70062+ if (!cap_raised(cap_drop, cap)) {
70063+ if (cap_raised(cap_audit, cap))
70064+ gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]);
70065+ return 1;
70066+ }
70067+
70068+ curracl = task->acl;
70069+
70070+ if ((curracl->mode & (GR_LEARN | GR_INHERITLEARN))
70071+ && cap_raised(cred->cap_effective, cap)) {
70072+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
70073+ task->role->roletype, GR_GLOBAL_UID(cred->uid),
70074+ GR_GLOBAL_GID(cred->gid), task->exec_file ?
70075+ gr_to_filename(task->exec_file->f_path.dentry,
70076+ task->exec_file->f_path.mnt) : curracl->filename,
70077+ curracl->filename, 0UL,
70078+ 0UL, "", (unsigned long) cap, &task->signal->saved_ip);
70079+ return 1;
70080+ }
70081+
70082+ if ((cap >= 0) && (cap < captab_log_entries) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap))
70083+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
70084+
70085+ return 0;
70086+}
70087+
70088+int
70089+gr_acl_is_capable(const int cap)
70090+{
70091+ return gr_task_acl_is_capable(current, current_cred(), cap);
70092+}
70093+
70094+int gr_task_acl_is_capable_nolog(const struct task_struct *task, const int cap)
70095+{
70096+ struct acl_subject_label *curracl;
70097+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
70098+
70099+ if (!gr_acl_is_enabled())
70100+ return 1;
70101+
70102+ curracl = task->acl;
70103+
70104+ cap_drop = curracl->cap_lower;
70105+ cap_mask = curracl->cap_mask;
70106+
70107+ while ((curracl = curracl->parent_subject)) {
70108+ /* if the cap isn't specified in the current computed mask but is specified in the
70109+ current level subject, and is lowered in the current level subject, then add
70110+ it to the set of dropped capabilities
70111+ otherwise, add the current level subject's mask to the current computed mask
70112+ */
70113+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
70114+ cap_raise(cap_mask, cap);
70115+ if (cap_raised(curracl->cap_lower, cap))
70116+ cap_raise(cap_drop, cap);
70117+ }
70118+ }
70119+
70120+ if (!cap_raised(cap_drop, cap))
70121+ return 1;
70122+
70123+ return 0;
70124+}
70125+
70126+int
70127+gr_acl_is_capable_nolog(const int cap)
70128+{
70129+ return gr_task_acl_is_capable_nolog(current, cap);
70130+}
70131+
70132diff --git a/grsecurity/gracl_compat.c b/grsecurity/gracl_compat.c
70133new file mode 100644
70134index 0000000..ca25605
70135--- /dev/null
70136+++ b/grsecurity/gracl_compat.c
70137@@ -0,0 +1,270 @@
70138+#include <linux/kernel.h>
70139+#include <linux/gracl.h>
70140+#include <linux/compat.h>
70141+#include <linux/gracl_compat.h>
70142+
70143+#include <asm/uaccess.h>
70144+
70145+int copy_gr_arg_wrapper_compat(const char *buf, struct gr_arg_wrapper *uwrap)
70146+{
70147+ struct gr_arg_wrapper_compat uwrapcompat;
70148+
70149+ if (copy_from_user(&uwrapcompat, buf, sizeof(uwrapcompat)))
70150+ return -EFAULT;
70151+
70152+ if (((uwrapcompat.version != GRSECURITY_VERSION) &&
70153+ (uwrapcompat.version != 0x2901)) ||
70154+ (uwrapcompat.size != sizeof(struct gr_arg_compat)))
70155+ return -EINVAL;
70156+
70157+ uwrap->arg = compat_ptr(uwrapcompat.arg);
70158+ uwrap->version = uwrapcompat.version;
70159+ uwrap->size = sizeof(struct gr_arg);
70160+
70161+ return 0;
70162+}
70163+
70164+int copy_gr_arg_compat(const struct gr_arg __user *buf, struct gr_arg *arg)
70165+{
70166+ struct gr_arg_compat argcompat;
70167+
70168+ if (copy_from_user(&argcompat, buf, sizeof(argcompat)))
70169+ return -EFAULT;
70170+
70171+ arg->role_db.r_table = compat_ptr(argcompat.role_db.r_table);
70172+ arg->role_db.num_pointers = argcompat.role_db.num_pointers;
70173+ arg->role_db.num_roles = argcompat.role_db.num_roles;
70174+ arg->role_db.num_domain_children = argcompat.role_db.num_domain_children;
70175+ arg->role_db.num_subjects = argcompat.role_db.num_subjects;
70176+ arg->role_db.num_objects = argcompat.role_db.num_objects;
70177+
70178+ memcpy(&arg->pw, &argcompat.pw, sizeof(arg->pw));
70179+ memcpy(&arg->salt, &argcompat.salt, sizeof(arg->salt));
70180+ memcpy(&arg->sum, &argcompat.sum, sizeof(arg->sum));
70181+ memcpy(&arg->sp_role, &argcompat.sp_role, sizeof(arg->sp_role));
70182+ arg->sprole_pws = compat_ptr(argcompat.sprole_pws);
70183+ arg->segv_device = argcompat.segv_device;
70184+ arg->segv_inode = argcompat.segv_inode;
70185+ arg->segv_uid = argcompat.segv_uid;
70186+ arg->num_sprole_pws = argcompat.num_sprole_pws;
70187+ arg->mode = argcompat.mode;
70188+
70189+ return 0;
70190+}
70191+
70192+int copy_acl_object_label_compat(struct acl_object_label *obj, const struct acl_object_label *userp)
70193+{
70194+ struct acl_object_label_compat objcompat;
70195+
70196+ if (copy_from_user(&objcompat, userp, sizeof(objcompat)))
70197+ return -EFAULT;
70198+
70199+ obj->filename = compat_ptr(objcompat.filename);
70200+ obj->inode = objcompat.inode;
70201+ obj->device = objcompat.device;
70202+ obj->mode = objcompat.mode;
70203+
70204+ obj->nested = compat_ptr(objcompat.nested);
70205+ obj->globbed = compat_ptr(objcompat.globbed);
70206+
70207+ obj->prev = compat_ptr(objcompat.prev);
70208+ obj->next = compat_ptr(objcompat.next);
70209+
70210+ return 0;
70211+}
70212+
70213+int copy_acl_subject_label_compat(struct acl_subject_label *subj, const struct acl_subject_label *userp)
70214+{
70215+ unsigned int i;
70216+ struct acl_subject_label_compat subjcompat;
70217+
70218+ if (copy_from_user(&subjcompat, userp, sizeof(subjcompat)))
70219+ return -EFAULT;
70220+
70221+ subj->filename = compat_ptr(subjcompat.filename);
70222+ subj->inode = subjcompat.inode;
70223+ subj->device = subjcompat.device;
70224+ subj->mode = subjcompat.mode;
70225+ subj->cap_mask = subjcompat.cap_mask;
70226+ subj->cap_lower = subjcompat.cap_lower;
70227+ subj->cap_invert_audit = subjcompat.cap_invert_audit;
70228+
70229+ for (i = 0; i < GR_NLIMITS; i++) {
70230+ if (subjcompat.res[i].rlim_cur == COMPAT_RLIM_INFINITY)
70231+ subj->res[i].rlim_cur = RLIM_INFINITY;
70232+ else
70233+ subj->res[i].rlim_cur = subjcompat.res[i].rlim_cur;
70234+ if (subjcompat.res[i].rlim_max == COMPAT_RLIM_INFINITY)
70235+ subj->res[i].rlim_max = RLIM_INFINITY;
70236+ else
70237+ subj->res[i].rlim_max = subjcompat.res[i].rlim_max;
70238+ }
70239+ subj->resmask = subjcompat.resmask;
70240+
70241+ subj->user_trans_type = subjcompat.user_trans_type;
70242+ subj->group_trans_type = subjcompat.group_trans_type;
70243+ subj->user_transitions = compat_ptr(subjcompat.user_transitions);
70244+ subj->group_transitions = compat_ptr(subjcompat.group_transitions);
70245+ subj->user_trans_num = subjcompat.user_trans_num;
70246+ subj->group_trans_num = subjcompat.group_trans_num;
70247+
70248+ memcpy(&subj->sock_families, &subjcompat.sock_families, sizeof(subj->sock_families));
70249+ memcpy(&subj->ip_proto, &subjcompat.ip_proto, sizeof(subj->ip_proto));
70250+ subj->ip_type = subjcompat.ip_type;
70251+ subj->ips = compat_ptr(subjcompat.ips);
70252+ subj->ip_num = subjcompat.ip_num;
70253+ subj->inaddr_any_override = subjcompat.inaddr_any_override;
70254+
70255+ subj->crashes = subjcompat.crashes;
70256+ subj->expires = subjcompat.expires;
70257+
70258+ subj->parent_subject = compat_ptr(subjcompat.parent_subject);
70259+ subj->hash = compat_ptr(subjcompat.hash);
70260+ subj->prev = compat_ptr(subjcompat.prev);
70261+ subj->next = compat_ptr(subjcompat.next);
70262+
70263+ subj->obj_hash = compat_ptr(subjcompat.obj_hash);
70264+ subj->obj_hash_size = subjcompat.obj_hash_size;
70265+ subj->pax_flags = subjcompat.pax_flags;
70266+
70267+ return 0;
70268+}
70269+
70270+int copy_acl_role_label_compat(struct acl_role_label *role, const struct acl_role_label *userp)
70271+{
70272+ struct acl_role_label_compat rolecompat;
70273+
70274+ if (copy_from_user(&rolecompat, userp, sizeof(rolecompat)))
70275+ return -EFAULT;
70276+
70277+ role->rolename = compat_ptr(rolecompat.rolename);
70278+ role->uidgid = rolecompat.uidgid;
70279+ role->roletype = rolecompat.roletype;
70280+
70281+ role->auth_attempts = rolecompat.auth_attempts;
70282+ role->expires = rolecompat.expires;
70283+
70284+ role->root_label = compat_ptr(rolecompat.root_label);
70285+ role->hash = compat_ptr(rolecompat.hash);
70286+
70287+ role->prev = compat_ptr(rolecompat.prev);
70288+ role->next = compat_ptr(rolecompat.next);
70289+
70290+ role->transitions = compat_ptr(rolecompat.transitions);
70291+ role->allowed_ips = compat_ptr(rolecompat.allowed_ips);
70292+ role->domain_children = compat_ptr(rolecompat.domain_children);
70293+ role->domain_child_num = rolecompat.domain_child_num;
70294+
70295+ role->umask = rolecompat.umask;
70296+
70297+ role->subj_hash = compat_ptr(rolecompat.subj_hash);
70298+ role->subj_hash_size = rolecompat.subj_hash_size;
70299+
70300+ return 0;
70301+}
70302+
70303+int copy_role_allowed_ip_compat(struct role_allowed_ip *roleip, const struct role_allowed_ip *userp)
70304+{
70305+ struct role_allowed_ip_compat roleip_compat;
70306+
70307+ if (copy_from_user(&roleip_compat, userp, sizeof(roleip_compat)))
70308+ return -EFAULT;
70309+
70310+ roleip->addr = roleip_compat.addr;
70311+ roleip->netmask = roleip_compat.netmask;
70312+
70313+ roleip->prev = compat_ptr(roleip_compat.prev);
70314+ roleip->next = compat_ptr(roleip_compat.next);
70315+
70316+ return 0;
70317+}
70318+
70319+int copy_role_transition_compat(struct role_transition *trans, const struct role_transition *userp)
70320+{
70321+ struct role_transition_compat trans_compat;
70322+
70323+ if (copy_from_user(&trans_compat, userp, sizeof(trans_compat)))
70324+ return -EFAULT;
70325+
70326+ trans->rolename = compat_ptr(trans_compat.rolename);
70327+
70328+ trans->prev = compat_ptr(trans_compat.prev);
70329+ trans->next = compat_ptr(trans_compat.next);
70330+
70331+ return 0;
70332+
70333+}
70334+
70335+int copy_gr_hash_struct_compat(struct gr_hash_struct *hash, const struct gr_hash_struct *userp)
70336+{
70337+ struct gr_hash_struct_compat hash_compat;
70338+
70339+ if (copy_from_user(&hash_compat, userp, sizeof(hash_compat)))
70340+ return -EFAULT;
70341+
70342+ hash->table = compat_ptr(hash_compat.table);
70343+ hash->nametable = compat_ptr(hash_compat.nametable);
70344+ hash->first = compat_ptr(hash_compat.first);
70345+
70346+ hash->table_size = hash_compat.table_size;
70347+ hash->used_size = hash_compat.used_size;
70348+
70349+ hash->type = hash_compat.type;
70350+
70351+ return 0;
70352+}
70353+
70354+int copy_pointer_from_array_compat(void *ptr, unsigned long idx, const void *userp)
70355+{
70356+ compat_uptr_t ptrcompat;
70357+
70358+ if (copy_from_user(&ptrcompat, userp + (idx * sizeof(ptrcompat)), sizeof(ptrcompat)))
70359+ return -EFAULT;
70360+
70361+ *(void **)ptr = compat_ptr(ptrcompat);
70362+
70363+ return 0;
70364+}
70365+
70366+int copy_acl_ip_label_compat(struct acl_ip_label *ip, const struct acl_ip_label *userp)
70367+{
70368+ struct acl_ip_label_compat ip_compat;
70369+
70370+ if (copy_from_user(&ip_compat, userp, sizeof(ip_compat)))
70371+ return -EFAULT;
70372+
70373+ ip->iface = compat_ptr(ip_compat.iface);
70374+ ip->addr = ip_compat.addr;
70375+ ip->netmask = ip_compat.netmask;
70376+ ip->low = ip_compat.low;
70377+ ip->high = ip_compat.high;
70378+ ip->mode = ip_compat.mode;
70379+ ip->type = ip_compat.type;
70380+
70381+ memcpy(&ip->proto, &ip_compat.proto, sizeof(ip->proto));
70382+
70383+ ip->prev = compat_ptr(ip_compat.prev);
70384+ ip->next = compat_ptr(ip_compat.next);
70385+
70386+ return 0;
70387+}
70388+
70389+int copy_sprole_pw_compat(struct sprole_pw *pw, unsigned long idx, const struct sprole_pw *userp)
70390+{
70391+ struct sprole_pw_compat pw_compat;
70392+
70393+ if (copy_from_user(&pw_compat, (const void *)userp + (sizeof(pw_compat) * idx), sizeof(pw_compat)))
70394+ return -EFAULT;
70395+
70396+ pw->rolename = compat_ptr(pw_compat.rolename);
70397+ memcpy(&pw->salt, pw_compat.salt, sizeof(pw->salt));
70398+ memcpy(&pw->sum, pw_compat.sum, sizeof(pw->sum));
70399+
70400+ return 0;
70401+}
70402+
70403+size_t get_gr_arg_wrapper_size_compat(void)
70404+{
70405+ return sizeof(struct gr_arg_wrapper_compat);
70406+}
70407+
70408diff --git a/grsecurity/gracl_fs.c b/grsecurity/gracl_fs.c
70409new file mode 100644
70410index 0000000..a89b1f4
70411--- /dev/null
70412+++ b/grsecurity/gracl_fs.c
70413@@ -0,0 +1,437 @@
70414+#include <linux/kernel.h>
70415+#include <linux/sched.h>
70416+#include <linux/types.h>
70417+#include <linux/fs.h>
70418+#include <linux/file.h>
70419+#include <linux/stat.h>
70420+#include <linux/grsecurity.h>
70421+#include <linux/grinternal.h>
70422+#include <linux/gracl.h>
70423+
70424+umode_t
70425+gr_acl_umask(void)
70426+{
70427+ if (unlikely(!gr_acl_is_enabled()))
70428+ return 0;
70429+
70430+ return current->role->umask;
70431+}
70432+
70433+__u32
70434+gr_acl_handle_hidden_file(const struct dentry * dentry,
70435+ const struct vfsmount * mnt)
70436+{
70437+ __u32 mode;
70438+
70439+ if (unlikely(d_is_negative(dentry)))
70440+ return GR_FIND;
70441+
70442+ mode =
70443+ gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
70444+
70445+ if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
70446+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
70447+ return mode;
70448+ } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
70449+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
70450+ return 0;
70451+ } else if (unlikely(!(mode & GR_FIND)))
70452+ return 0;
70453+
70454+ return GR_FIND;
70455+}
70456+
70457+__u32
70458+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
70459+ int acc_mode)
70460+{
70461+ __u32 reqmode = GR_FIND;
70462+ __u32 mode;
70463+
70464+ if (unlikely(d_is_negative(dentry)))
70465+ return reqmode;
70466+
70467+ if (acc_mode & MAY_APPEND)
70468+ reqmode |= GR_APPEND;
70469+ else if (acc_mode & MAY_WRITE)
70470+ reqmode |= GR_WRITE;
70471+ if ((acc_mode & MAY_READ) && !S_ISDIR(dentry->d_inode->i_mode))
70472+ reqmode |= GR_READ;
70473+
70474+ mode =
70475+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
70476+ mnt);
70477+
70478+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
70479+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
70480+ reqmode & GR_READ ? " reading" : "",
70481+ reqmode & GR_WRITE ? " writing" : reqmode &
70482+ GR_APPEND ? " appending" : "");
70483+ return reqmode;
70484+ } else
70485+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
70486+ {
70487+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
70488+ reqmode & GR_READ ? " reading" : "",
70489+ reqmode & GR_WRITE ? " writing" : reqmode &
70490+ GR_APPEND ? " appending" : "");
70491+ return 0;
70492+ } else if (unlikely((mode & reqmode) != reqmode))
70493+ return 0;
70494+
70495+ return reqmode;
70496+}
70497+
70498+__u32
70499+gr_acl_handle_creat(const struct dentry * dentry,
70500+ const struct dentry * p_dentry,
70501+ const struct vfsmount * p_mnt, int open_flags, int acc_mode,
70502+ const int imode)
70503+{
70504+ __u32 reqmode = GR_WRITE | GR_CREATE;
70505+ __u32 mode;
70506+
70507+ if (acc_mode & MAY_APPEND)
70508+ reqmode |= GR_APPEND;
70509+ // if a directory was required or the directory already exists, then
70510+ // don't count this open as a read
70511+ if ((acc_mode & MAY_READ) &&
70512+ !((open_flags & O_DIRECTORY) || (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode))))
70513+ reqmode |= GR_READ;
70514+ if ((open_flags & O_CREAT) &&
70515+ ((imode & S_ISUID) || ((imode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))
70516+ reqmode |= GR_SETID;
70517+
70518+ mode =
70519+ gr_check_create(dentry, p_dentry, p_mnt,
70520+ reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
70521+
70522+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
70523+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
70524+ reqmode & GR_READ ? " reading" : "",
70525+ reqmode & GR_WRITE ? " writing" : reqmode &
70526+ GR_APPEND ? " appending" : "");
70527+ return reqmode;
70528+ } else
70529+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
70530+ {
70531+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
70532+ reqmode & GR_READ ? " reading" : "",
70533+ reqmode & GR_WRITE ? " writing" : reqmode &
70534+ GR_APPEND ? " appending" : "");
70535+ return 0;
70536+ } else if (unlikely((mode & reqmode) != reqmode))
70537+ return 0;
70538+
70539+ return reqmode;
70540+}
70541+
70542+__u32
70543+gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
70544+ const int fmode)
70545+{
70546+ __u32 mode, reqmode = GR_FIND;
70547+
70548+ if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
70549+ reqmode |= GR_EXEC;
70550+ if (fmode & S_IWOTH)
70551+ reqmode |= GR_WRITE;
70552+ if (fmode & S_IROTH)
70553+ reqmode |= GR_READ;
70554+
70555+ mode =
70556+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
70557+ mnt);
70558+
70559+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
70560+ gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
70561+ reqmode & GR_READ ? " reading" : "",
70562+ reqmode & GR_WRITE ? " writing" : "",
70563+ reqmode & GR_EXEC ? " executing" : "");
70564+ return reqmode;
70565+ } else
70566+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
70567+ {
70568+ gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
70569+ reqmode & GR_READ ? " reading" : "",
70570+ reqmode & GR_WRITE ? " writing" : "",
70571+ reqmode & GR_EXEC ? " executing" : "");
70572+ return 0;
70573+ } else if (unlikely((mode & reqmode) != reqmode))
70574+ return 0;
70575+
70576+ return reqmode;
70577+}
70578+
70579+static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
70580+{
70581+ __u32 mode;
70582+
70583+ mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
70584+
70585+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
70586+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
70587+ return mode;
70588+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
70589+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
70590+ return 0;
70591+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
70592+ return 0;
70593+
70594+ return (reqmode);
70595+}
70596+
70597+__u32
70598+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
70599+{
70600+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
70601+}
70602+
70603+__u32
70604+gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
70605+{
70606+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
70607+}
70608+
70609+__u32
70610+gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
70611+{
70612+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
70613+}
70614+
70615+__u32
70616+gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
70617+{
70618+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
70619+}
70620+
70621+__u32
70622+gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
70623+ umode_t *modeptr)
70624+{
70625+ umode_t mode;
70626+
70627+ *modeptr &= ~gr_acl_umask();
70628+ mode = *modeptr;
70629+
70630+ if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
70631+ return 1;
70632+
70633+ if (unlikely(dentry->d_inode && !S_ISDIR(dentry->d_inode->i_mode) &&
70634+ ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))) {
70635+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
70636+ GR_CHMOD_ACL_MSG);
70637+ } else {
70638+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
70639+ }
70640+}
70641+
70642+__u32
70643+gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
70644+{
70645+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
70646+}
70647+
70648+__u32
70649+gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt)
70650+{
70651+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG);
70652+}
70653+
70654+__u32
70655+gr_acl_handle_removexattr(const struct dentry *dentry, const struct vfsmount *mnt)
70656+{
70657+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_REMOVEXATTR_ACL_MSG);
70658+}
70659+
70660+__u32
70661+gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
70662+{
70663+ return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
70664+}
70665+
70666+__u32
70667+gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
70668+{
70669+ return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
70670+ GR_UNIXCONNECT_ACL_MSG);
70671+}
70672+
70673+/* hardlinks require at minimum create and link permission,
70674+ any additional privilege required is based on the
70675+ privilege of the file being linked to
70676+*/
70677+__u32
70678+gr_acl_handle_link(const struct dentry * new_dentry,
70679+ const struct dentry * parent_dentry,
70680+ const struct vfsmount * parent_mnt,
70681+ const struct dentry * old_dentry,
70682+ const struct vfsmount * old_mnt, const struct filename *to)
70683+{
70684+ __u32 mode;
70685+ __u32 needmode = GR_CREATE | GR_LINK;
70686+ __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
70687+
70688+ mode =
70689+ gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
70690+ old_mnt);
70691+
70692+ if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
70693+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to->name);
70694+ return mode;
70695+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
70696+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to->name);
70697+ return 0;
70698+ } else if (unlikely((mode & needmode) != needmode))
70699+ return 0;
70700+
70701+ return 1;
70702+}
70703+
70704+__u32
70705+gr_acl_handle_symlink(const struct dentry * new_dentry,
70706+ const struct dentry * parent_dentry,
70707+ const struct vfsmount * parent_mnt, const struct filename *from)
70708+{
70709+ __u32 needmode = GR_WRITE | GR_CREATE;
70710+ __u32 mode;
70711+
70712+ mode =
70713+ gr_check_create(new_dentry, parent_dentry, parent_mnt,
70714+ GR_CREATE | GR_AUDIT_CREATE |
70715+ GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
70716+
70717+ if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
70718+ gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from->name, new_dentry, parent_mnt);
70719+ return mode;
70720+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
70721+ gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from->name, new_dentry, parent_mnt);
70722+ return 0;
70723+ } else if (unlikely((mode & needmode) != needmode))
70724+ return 0;
70725+
70726+ return (GR_WRITE | GR_CREATE);
70727+}
70728+
70729+static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
70730+{
70731+ __u32 mode;
70732+
70733+ mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
70734+
70735+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
70736+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
70737+ return mode;
70738+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
70739+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
70740+ return 0;
70741+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
70742+ return 0;
70743+
70744+ return (reqmode);
70745+}
70746+
70747+__u32
70748+gr_acl_handle_mknod(const struct dentry * new_dentry,
70749+ const struct dentry * parent_dentry,
70750+ const struct vfsmount * parent_mnt,
70751+ const int mode)
70752+{
70753+ __u32 reqmode = GR_WRITE | GR_CREATE;
70754+ if (unlikely((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))
70755+ reqmode |= GR_SETID;
70756+
70757+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
70758+ reqmode, GR_MKNOD_ACL_MSG);
70759+}
70760+
70761+__u32
70762+gr_acl_handle_mkdir(const struct dentry *new_dentry,
70763+ const struct dentry *parent_dentry,
70764+ const struct vfsmount *parent_mnt)
70765+{
70766+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
70767+ GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
70768+}
70769+
70770+#define RENAME_CHECK_SUCCESS(old, new) \
70771+ (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
70772+ ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
70773+
70774+int
70775+gr_acl_handle_rename(struct dentry *new_dentry,
70776+ struct dentry *parent_dentry,
70777+ const struct vfsmount *parent_mnt,
70778+ struct dentry *old_dentry,
70779+ struct inode *old_parent_inode,
70780+ struct vfsmount *old_mnt, const struct filename *newname)
70781+{
70782+ __u32 comp1, comp2;
70783+ int error = 0;
70784+
70785+ if (unlikely(!gr_acl_is_enabled()))
70786+ return 0;
70787+
70788+ if (d_is_negative(new_dentry)) {
70789+ comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
70790+ GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
70791+ GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
70792+ comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
70793+ GR_DELETE | GR_AUDIT_DELETE |
70794+ GR_AUDIT_READ | GR_AUDIT_WRITE |
70795+ GR_SUPPRESS, old_mnt);
70796+ } else {
70797+ comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
70798+ GR_CREATE | GR_DELETE |
70799+ GR_AUDIT_CREATE | GR_AUDIT_DELETE |
70800+ GR_AUDIT_READ | GR_AUDIT_WRITE |
70801+ GR_SUPPRESS, parent_mnt);
70802+ comp2 =
70803+ gr_search_file(old_dentry,
70804+ GR_READ | GR_WRITE | GR_AUDIT_READ |
70805+ GR_DELETE | GR_AUDIT_DELETE |
70806+ GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
70807+ }
70808+
70809+ if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
70810+ ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
70811+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname->name);
70812+ else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
70813+ && !(comp2 & GR_SUPPRESS)) {
70814+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname->name);
70815+ error = -EACCES;
70816+ } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
70817+ error = -EACCES;
70818+
70819+ return error;
70820+}
70821+
70822+void
70823+gr_acl_handle_exit(void)
70824+{
70825+ u16 id;
70826+ char *rolename;
70827+
70828+ if (unlikely(current->acl_sp_role && gr_acl_is_enabled() &&
70829+ !(current->role->roletype & GR_ROLE_PERSIST))) {
70830+ id = current->acl_role_id;
70831+ rolename = current->role->rolename;
70832+ gr_set_acls(1);
70833+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
70834+ }
70835+
70836+ gr_put_exec_file(current);
70837+ return;
70838+}
70839+
70840+int
70841+gr_acl_handle_procpidmem(const struct task_struct *task)
70842+{
70843+ if (unlikely(!gr_acl_is_enabled()))
70844+ return 0;
70845+
70846+ if (task != current && task->acl->mode & GR_PROTPROCFD)
70847+ return -EACCES;
70848+
70849+ return 0;
70850+}
70851diff --git a/grsecurity/gracl_ip.c b/grsecurity/gracl_ip.c
70852new file mode 100644
70853index 0000000..f056b81
70854--- /dev/null
70855+++ b/grsecurity/gracl_ip.c
70856@@ -0,0 +1,386 @@
70857+#include <linux/kernel.h>
70858+#include <asm/uaccess.h>
70859+#include <asm/errno.h>
70860+#include <net/sock.h>
70861+#include <linux/file.h>
70862+#include <linux/fs.h>
70863+#include <linux/net.h>
70864+#include <linux/in.h>
70865+#include <linux/skbuff.h>
70866+#include <linux/ip.h>
70867+#include <linux/udp.h>
70868+#include <linux/types.h>
70869+#include <linux/sched.h>
70870+#include <linux/netdevice.h>
70871+#include <linux/inetdevice.h>
70872+#include <linux/gracl.h>
70873+#include <linux/grsecurity.h>
70874+#include <linux/grinternal.h>
70875+
70876+#define GR_BIND 0x01
70877+#define GR_CONNECT 0x02
70878+#define GR_INVERT 0x04
70879+#define GR_BINDOVERRIDE 0x08
70880+#define GR_CONNECTOVERRIDE 0x10
70881+#define GR_SOCK_FAMILY 0x20
70882+
70883+static const char * gr_protocols[IPPROTO_MAX] = {
70884+ "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
70885+ "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
70886+ "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
70887+ "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
70888+ "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
70889+ "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
70890+ "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
70891+ "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
70892+ "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
70893+ "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak",
70894+ "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf",
70895+ "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
70896+ "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
70897+ "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
70898+ "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
70899+ "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
70900+ "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
70901+ "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
70902+ "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
70903+ "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
70904+ "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
70905+ "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
70906+ "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
70907+ "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
70908+ "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
70909+ "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
70910+ "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
70911+ "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
70912+ "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
70913+ "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
70914+ "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
70915+ "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
70916+ };
70917+
70918+static const char * gr_socktypes[SOCK_MAX] = {
70919+ "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6",
70920+ "unknown:7", "unknown:8", "unknown:9", "packet"
70921+ };
70922+
70923+static const char * gr_sockfamilies[AF_MAX+1] = {
70924+ "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25",
70925+ "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
70926+ "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
70927+ "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154", "ciaf"
70928+ };
70929+
70930+const char *
70931+gr_proto_to_name(unsigned char proto)
70932+{
70933+ return gr_protocols[proto];
70934+}
70935+
70936+const char *
70937+gr_socktype_to_name(unsigned char type)
70938+{
70939+ return gr_socktypes[type];
70940+}
70941+
70942+const char *
70943+gr_sockfamily_to_name(unsigned char family)
70944+{
70945+ return gr_sockfamilies[family];
70946+}
70947+
70948+extern const struct net_proto_family __rcu *net_families[NPROTO] __read_mostly;
70949+
70950+int
70951+gr_search_socket(const int domain, const int type, const int protocol)
70952+{
70953+ struct acl_subject_label *curr;
70954+ const struct cred *cred = current_cred();
70955+
70956+ if (unlikely(!gr_acl_is_enabled()))
70957+ goto exit;
70958+
70959+ if ((domain < 0) || (type < 0) || (protocol < 0) ||
70960+ (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX))
70961+ goto exit; // let the kernel handle it
70962+
70963+ curr = current->acl;
70964+
70965+ if (curr->sock_families[domain / 32] & (1U << (domain % 32))) {
70966+ /* the family is allowed, if this is PF_INET allow it only if
70967+ the extra sock type/protocol checks pass */
70968+ if (domain == PF_INET)
70969+ goto inet_check;
70970+ goto exit;
70971+ } else {
70972+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
70973+ __u32 fakeip = 0;
70974+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
70975+ current->role->roletype, GR_GLOBAL_UID(cred->uid),
70976+ GR_GLOBAL_GID(cred->gid), current->exec_file ?
70977+ gr_to_filename(current->exec_file->f_path.dentry,
70978+ current->exec_file->f_path.mnt) :
70979+ curr->filename, curr->filename,
70980+ &fakeip, domain, 0, 0, GR_SOCK_FAMILY,
70981+ &current->signal->saved_ip);
70982+ goto exit;
70983+ }
70984+ goto exit_fail;
70985+ }
70986+
70987+inet_check:
70988+ /* the rest of this checking is for IPv4 only */
70989+ if (!curr->ips)
70990+ goto exit;
70991+
70992+ if ((curr->ip_type & (1U << type)) &&
70993+ (curr->ip_proto[protocol / 32] & (1U << (protocol % 32))))
70994+ goto exit;
70995+
70996+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
70997+ /* we don't place acls on raw sockets , and sometimes
70998+ dgram/ip sockets are opened for ioctl and not
70999+ bind/connect, so we'll fake a bind learn log */
71000+ if (type == SOCK_RAW || type == SOCK_PACKET) {
71001+ __u32 fakeip = 0;
71002+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
71003+ current->role->roletype, GR_GLOBAL_UID(cred->uid),
71004+ GR_GLOBAL_GID(cred->gid), current->exec_file ?
71005+ gr_to_filename(current->exec_file->f_path.dentry,
71006+ current->exec_file->f_path.mnt) :
71007+ curr->filename, curr->filename,
71008+ &fakeip, 0, type,
71009+ protocol, GR_CONNECT, &current->signal->saved_ip);
71010+ } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
71011+ __u32 fakeip = 0;
71012+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
71013+ current->role->roletype, GR_GLOBAL_UID(cred->uid),
71014+ GR_GLOBAL_GID(cred->gid), current->exec_file ?
71015+ gr_to_filename(current->exec_file->f_path.dentry,
71016+ current->exec_file->f_path.mnt) :
71017+ curr->filename, curr->filename,
71018+ &fakeip, 0, type,
71019+ protocol, GR_BIND, &current->signal->saved_ip);
71020+ }
71021+ /* we'll log when they use connect or bind */
71022+ goto exit;
71023+ }
71024+
71025+exit_fail:
71026+ if (domain == PF_INET)
71027+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain),
71028+ gr_socktype_to_name(type), gr_proto_to_name(protocol));
71029+ else if (rcu_access_pointer(net_families[domain]) != NULL)
71030+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain),
71031+ gr_socktype_to_name(type), protocol);
71032+
71033+ return 0;
71034+exit:
71035+ return 1;
71036+}
71037+
71038+int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
71039+{
71040+ if ((ip->mode & mode) &&
71041+ (ip_port >= ip->low) &&
71042+ (ip_port <= ip->high) &&
71043+ ((ntohl(ip_addr) & our_netmask) ==
71044+ (ntohl(our_addr) & our_netmask))
71045+ && (ip->proto[protocol / 32] & (1U << (protocol % 32)))
71046+ && (ip->type & (1U << type))) {
71047+ if (ip->mode & GR_INVERT)
71048+ return 2; // specifically denied
71049+ else
71050+ return 1; // allowed
71051+ }
71052+
71053+ return 0; // not specifically allowed, may continue parsing
71054+}
71055+
71056+static int
71057+gr_search_connectbind(const int full_mode, struct sock *sk,
71058+ struct sockaddr_in *addr, const int type)
71059+{
71060+ char iface[IFNAMSIZ] = {0};
71061+ struct acl_subject_label *curr;
71062+ struct acl_ip_label *ip;
71063+ struct inet_sock *isk;
71064+ struct net_device *dev;
71065+ struct in_device *idev;
71066+ unsigned long i;
71067+ int ret;
71068+ int mode = full_mode & (GR_BIND | GR_CONNECT);
71069+ __u32 ip_addr = 0;
71070+ __u32 our_addr;
71071+ __u32 our_netmask;
71072+ char *p;
71073+ __u16 ip_port = 0;
71074+ const struct cred *cred = current_cred();
71075+
71076+ if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
71077+ return 0;
71078+
71079+ curr = current->acl;
71080+ isk = inet_sk(sk);
71081+
71082+ /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */
71083+ if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0)
71084+ addr->sin_addr.s_addr = curr->inaddr_any_override;
71085+ if ((full_mode & GR_CONNECT) && isk->inet_saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) {
71086+ struct sockaddr_in saddr;
71087+ int err;
71088+
71089+ saddr.sin_family = AF_INET;
71090+ saddr.sin_addr.s_addr = curr->inaddr_any_override;
71091+ saddr.sin_port = isk->inet_sport;
71092+
71093+ err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
71094+ if (err)
71095+ return err;
71096+
71097+ err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
71098+ if (err)
71099+ return err;
71100+ }
71101+
71102+ if (!curr->ips)
71103+ return 0;
71104+
71105+ ip_addr = addr->sin_addr.s_addr;
71106+ ip_port = ntohs(addr->sin_port);
71107+
71108+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
71109+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
71110+ current->role->roletype, GR_GLOBAL_UID(cred->uid),
71111+ GR_GLOBAL_GID(cred->gid), current->exec_file ?
71112+ gr_to_filename(current->exec_file->f_path.dentry,
71113+ current->exec_file->f_path.mnt) :
71114+ curr->filename, curr->filename,
71115+ &ip_addr, ip_port, type,
71116+ sk->sk_protocol, mode, &current->signal->saved_ip);
71117+ return 0;
71118+ }
71119+
71120+ for (i = 0; i < curr->ip_num; i++) {
71121+ ip = *(curr->ips + i);
71122+ if (ip->iface != NULL) {
71123+ strncpy(iface, ip->iface, IFNAMSIZ - 1);
71124+ p = strchr(iface, ':');
71125+ if (p != NULL)
71126+ *p = '\0';
71127+ dev = dev_get_by_name(sock_net(sk), iface);
71128+ if (dev == NULL)
71129+ continue;
71130+ idev = in_dev_get(dev);
71131+ if (idev == NULL) {
71132+ dev_put(dev);
71133+ continue;
71134+ }
71135+ rcu_read_lock();
71136+ for_ifa(idev) {
71137+ if (!strcmp(ip->iface, ifa->ifa_label)) {
71138+ our_addr = ifa->ifa_address;
71139+ our_netmask = 0xffffffff;
71140+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
71141+ if (ret == 1) {
71142+ rcu_read_unlock();
71143+ in_dev_put(idev);
71144+ dev_put(dev);
71145+ return 0;
71146+ } else if (ret == 2) {
71147+ rcu_read_unlock();
71148+ in_dev_put(idev);
71149+ dev_put(dev);
71150+ goto denied;
71151+ }
71152+ }
71153+ } endfor_ifa(idev);
71154+ rcu_read_unlock();
71155+ in_dev_put(idev);
71156+ dev_put(dev);
71157+ } else {
71158+ our_addr = ip->addr;
71159+ our_netmask = ip->netmask;
71160+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
71161+ if (ret == 1)
71162+ return 0;
71163+ else if (ret == 2)
71164+ goto denied;
71165+ }
71166+ }
71167+
71168+denied:
71169+ if (mode == GR_BIND)
71170+ gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
71171+ else if (mode == GR_CONNECT)
71172+ gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
71173+
71174+ return -EACCES;
71175+}
71176+
71177+int
71178+gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
71179+{
71180+ /* always allow disconnection of dgram sockets with connect */
71181+ if (addr->sin_family == AF_UNSPEC)
71182+ return 0;
71183+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
71184+}
71185+
71186+int
71187+gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
71188+{
71189+ return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
71190+}
71191+
71192+int gr_search_listen(struct socket *sock)
71193+{
71194+ struct sock *sk = sock->sk;
71195+ struct sockaddr_in addr;
71196+
71197+ addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
71198+ addr.sin_port = inet_sk(sk)->inet_sport;
71199+
71200+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
71201+}
71202+
71203+int gr_search_accept(struct socket *sock)
71204+{
71205+ struct sock *sk = sock->sk;
71206+ struct sockaddr_in addr;
71207+
71208+ addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
71209+ addr.sin_port = inet_sk(sk)->inet_sport;
71210+
71211+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
71212+}
71213+
71214+int
71215+gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
71216+{
71217+ if (addr)
71218+ return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
71219+ else {
71220+ struct sockaddr_in sin;
71221+ const struct inet_sock *inet = inet_sk(sk);
71222+
71223+ sin.sin_addr.s_addr = inet->inet_daddr;
71224+ sin.sin_port = inet->inet_dport;
71225+
71226+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
71227+ }
71228+}
71229+
71230+int
71231+gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
71232+{
71233+ struct sockaddr_in sin;
71234+
71235+ if (unlikely(skb->len < sizeof (struct udphdr)))
71236+ return 0; // skip this packet
71237+
71238+ sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
71239+ sin.sin_port = udp_hdr(skb)->source;
71240+
71241+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
71242+}
71243diff --git a/grsecurity/gracl_learn.c b/grsecurity/gracl_learn.c
71244new file mode 100644
71245index 0000000..25f54ef
71246--- /dev/null
71247+++ b/grsecurity/gracl_learn.c
71248@@ -0,0 +1,207 @@
71249+#include <linux/kernel.h>
71250+#include <linux/mm.h>
71251+#include <linux/sched.h>
71252+#include <linux/poll.h>
71253+#include <linux/string.h>
71254+#include <linux/file.h>
71255+#include <linux/types.h>
71256+#include <linux/vmalloc.h>
71257+#include <linux/grinternal.h>
71258+
71259+extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
71260+ size_t count, loff_t *ppos);
71261+extern int gr_acl_is_enabled(void);
71262+
71263+static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
71264+static int gr_learn_attached;
71265+
71266+/* use a 512k buffer */
71267+#define LEARN_BUFFER_SIZE (512 * 1024)
71268+
71269+static DEFINE_SPINLOCK(gr_learn_lock);
71270+static DEFINE_MUTEX(gr_learn_user_mutex);
71271+
71272+/* we need to maintain two buffers, so that the kernel context of grlearn
71273+ uses a semaphore around the userspace copying, and the other kernel contexts
71274+ use a spinlock when copying into the buffer, since they cannot sleep
71275+*/
71276+static char *learn_buffer;
71277+static char *learn_buffer_user;
71278+static int learn_buffer_len;
71279+static int learn_buffer_user_len;
71280+
71281+static ssize_t
71282+read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
71283+{
71284+ DECLARE_WAITQUEUE(wait, current);
71285+ ssize_t retval = 0;
71286+
71287+ add_wait_queue(&learn_wait, &wait);
71288+ set_current_state(TASK_INTERRUPTIBLE);
71289+ do {
71290+ mutex_lock(&gr_learn_user_mutex);
71291+ spin_lock(&gr_learn_lock);
71292+ if (learn_buffer_len)
71293+ break;
71294+ spin_unlock(&gr_learn_lock);
71295+ mutex_unlock(&gr_learn_user_mutex);
71296+ if (file->f_flags & O_NONBLOCK) {
71297+ retval = -EAGAIN;
71298+ goto out;
71299+ }
71300+ if (signal_pending(current)) {
71301+ retval = -ERESTARTSYS;
71302+ goto out;
71303+ }
71304+
71305+ schedule();
71306+ } while (1);
71307+
71308+ memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
71309+ learn_buffer_user_len = learn_buffer_len;
71310+ retval = learn_buffer_len;
71311+ learn_buffer_len = 0;
71312+
71313+ spin_unlock(&gr_learn_lock);
71314+
71315+ if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
71316+ retval = -EFAULT;
71317+
71318+ mutex_unlock(&gr_learn_user_mutex);
71319+out:
71320+ set_current_state(TASK_RUNNING);
71321+ remove_wait_queue(&learn_wait, &wait);
71322+ return retval;
71323+}
71324+
71325+static unsigned int
71326+poll_learn(struct file * file, poll_table * wait)
71327+{
71328+ poll_wait(file, &learn_wait, wait);
71329+
71330+ if (learn_buffer_len)
71331+ return (POLLIN | POLLRDNORM);
71332+
71333+ return 0;
71334+}
71335+
71336+void
71337+gr_clear_learn_entries(void)
71338+{
71339+ char *tmp;
71340+
71341+ mutex_lock(&gr_learn_user_mutex);
71342+ spin_lock(&gr_learn_lock);
71343+ tmp = learn_buffer;
71344+ learn_buffer = NULL;
71345+ spin_unlock(&gr_learn_lock);
71346+ if (tmp)
71347+ vfree(tmp);
71348+ if (learn_buffer_user != NULL) {
71349+ vfree(learn_buffer_user);
71350+ learn_buffer_user = NULL;
71351+ }
71352+ learn_buffer_len = 0;
71353+ mutex_unlock(&gr_learn_user_mutex);
71354+
71355+ return;
71356+}
71357+
71358+void
71359+gr_add_learn_entry(const char *fmt, ...)
71360+{
71361+ va_list args;
71362+ unsigned int len;
71363+
71364+ if (!gr_learn_attached)
71365+ return;
71366+
71367+ spin_lock(&gr_learn_lock);
71368+
71369+ /* leave a gap at the end so we know when it's "full" but don't have to
71370+ compute the exact length of the string we're trying to append
71371+ */
71372+ if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
71373+ spin_unlock(&gr_learn_lock);
71374+ wake_up_interruptible(&learn_wait);
71375+ return;
71376+ }
71377+ if (learn_buffer == NULL) {
71378+ spin_unlock(&gr_learn_lock);
71379+ return;
71380+ }
71381+
71382+ va_start(args, fmt);
71383+ len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
71384+ va_end(args);
71385+
71386+ learn_buffer_len += len + 1;
71387+
71388+ spin_unlock(&gr_learn_lock);
71389+ wake_up_interruptible(&learn_wait);
71390+
71391+ return;
71392+}
71393+
71394+static int
71395+open_learn(struct inode *inode, struct file *file)
71396+{
71397+ if (file->f_mode & FMODE_READ && gr_learn_attached)
71398+ return -EBUSY;
71399+ if (file->f_mode & FMODE_READ) {
71400+ int retval = 0;
71401+ mutex_lock(&gr_learn_user_mutex);
71402+ if (learn_buffer == NULL)
71403+ learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
71404+ if (learn_buffer_user == NULL)
71405+ learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
71406+ if (learn_buffer == NULL) {
71407+ retval = -ENOMEM;
71408+ goto out_error;
71409+ }
71410+ if (learn_buffer_user == NULL) {
71411+ retval = -ENOMEM;
71412+ goto out_error;
71413+ }
71414+ learn_buffer_len = 0;
71415+ learn_buffer_user_len = 0;
71416+ gr_learn_attached = 1;
71417+out_error:
71418+ mutex_unlock(&gr_learn_user_mutex);
71419+ return retval;
71420+ }
71421+ return 0;
71422+}
71423+
71424+static int
71425+close_learn(struct inode *inode, struct file *file)
71426+{
71427+ if (file->f_mode & FMODE_READ) {
71428+ char *tmp = NULL;
71429+ mutex_lock(&gr_learn_user_mutex);
71430+ spin_lock(&gr_learn_lock);
71431+ tmp = learn_buffer;
71432+ learn_buffer = NULL;
71433+ spin_unlock(&gr_learn_lock);
71434+ if (tmp)
71435+ vfree(tmp);
71436+ if (learn_buffer_user != NULL) {
71437+ vfree(learn_buffer_user);
71438+ learn_buffer_user = NULL;
71439+ }
71440+ learn_buffer_len = 0;
71441+ learn_buffer_user_len = 0;
71442+ gr_learn_attached = 0;
71443+ mutex_unlock(&gr_learn_user_mutex);
71444+ }
71445+
71446+ return 0;
71447+}
71448+
71449+const struct file_operations grsec_fops = {
71450+ .read = read_learn,
71451+ .write = write_grsec_handler,
71452+ .open = open_learn,
71453+ .release = close_learn,
71454+ .poll = poll_learn,
71455+};
71456diff --git a/grsecurity/gracl_policy.c b/grsecurity/gracl_policy.c
71457new file mode 100644
71458index 0000000..361a099
71459--- /dev/null
71460+++ b/grsecurity/gracl_policy.c
71461@@ -0,0 +1,1782 @@
71462+#include <linux/kernel.h>
71463+#include <linux/module.h>
71464+#include <linux/sched.h>
71465+#include <linux/mm.h>
71466+#include <linux/file.h>
71467+#include <linux/fs.h>
71468+#include <linux/namei.h>
71469+#include <linux/mount.h>
71470+#include <linux/tty.h>
71471+#include <linux/proc_fs.h>
71472+#include <linux/lglock.h>
71473+#include <linux/slab.h>
71474+#include <linux/vmalloc.h>
71475+#include <linux/types.h>
71476+#include <linux/sysctl.h>
71477+#include <linux/netdevice.h>
71478+#include <linux/ptrace.h>
71479+#include <linux/gracl.h>
71480+#include <linux/gralloc.h>
71481+#include <linux/security.h>
71482+#include <linux/grinternal.h>
71483+#include <linux/pid_namespace.h>
71484+#include <linux/stop_machine.h>
71485+#include <linux/fdtable.h>
71486+#include <linux/percpu.h>
71487+#include <linux/lglock.h>
71488+#include <linux/hugetlb.h>
71489+#include <linux/posix-timers.h>
71490+#include "../fs/mount.h"
71491+
71492+#include <asm/uaccess.h>
71493+#include <asm/errno.h>
71494+#include <asm/mman.h>
71495+
71496+extern struct gr_policy_state *polstate;
71497+
71498+#define FOR_EACH_ROLE_START(role) \
71499+ role = polstate->role_list; \
71500+ while (role) {
71501+
71502+#define FOR_EACH_ROLE_END(role) \
71503+ role = role->prev; \
71504+ }
71505+
71506+struct path gr_real_root;
71507+
71508+extern struct gr_alloc_state *current_alloc_state;
71509+
71510+u16 acl_sp_role_value;
71511+
71512+static DEFINE_MUTEX(gr_dev_mutex);
71513+
71514+extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
71515+extern void gr_clear_learn_entries(void);
71516+
71517+static struct gr_arg gr_usermode;
71518+static unsigned char gr_system_salt[GR_SALT_LEN];
71519+static unsigned char gr_system_sum[GR_SHA_LEN];
71520+
71521+static unsigned int gr_auth_attempts = 0;
71522+static unsigned long gr_auth_expires = 0UL;
71523+
71524+struct acl_object_label *fakefs_obj_rw;
71525+struct acl_object_label *fakefs_obj_rwx;
71526+
71527+extern int gr_init_uidset(void);
71528+extern void gr_free_uidset(void);
71529+extern void gr_remove_uid(uid_t uid);
71530+extern int gr_find_uid(uid_t uid);
71531+
71532+extern struct acl_subject_label *__gr_get_subject_for_task(const struct gr_policy_state *state, struct task_struct *task, const char *filename);
71533+extern void __gr_apply_subject_to_task(struct gr_policy_state *state, struct task_struct *task, struct acl_subject_label *subj);
71534+extern int gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb);
71535+extern void __insert_inodev_entry(const struct gr_policy_state *state, struct inodev_entry *entry);
71536+extern struct acl_role_label *__lookup_acl_role_label(const struct gr_policy_state *state, const struct task_struct *task, const uid_t uid, const gid_t gid);
71537+extern void insert_acl_obj_label(struct acl_object_label *obj, struct acl_subject_label *subj);
71538+extern void insert_acl_subj_label(struct acl_subject_label *obj, struct acl_role_label *role);
71539+extern struct name_entry * __lookup_name_entry(const struct gr_policy_state *state, const char *name);
71540+extern char *gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt);
71541+extern struct acl_subject_label *lookup_acl_subj_label(const ino_t ino, const dev_t dev, const struct acl_role_label *role);
71542+extern struct acl_subject_label *lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev, const struct acl_role_label *role);
71543+extern void assign_special_role(const char *rolename);
71544+extern struct acl_subject_label *chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt, const struct acl_role_label *role);
71545+extern int gr_rbac_disable(void *unused);
71546+extern void gr_enable_rbac_system(void);
71547+
71548+static int copy_acl_object_label_normal(struct acl_object_label *obj, const struct acl_object_label *userp)
71549+{
71550+ if (copy_from_user(obj, userp, sizeof(struct acl_object_label)))
71551+ return -EFAULT;
71552+
71553+ return 0;
71554+}
71555+
71556+static int copy_acl_ip_label_normal(struct acl_ip_label *ip, const struct acl_ip_label *userp)
71557+{
71558+ if (copy_from_user(ip, userp, sizeof(struct acl_ip_label)))
71559+ return -EFAULT;
71560+
71561+ return 0;
71562+}
71563+
71564+static int copy_acl_subject_label_normal(struct acl_subject_label *subj, const struct acl_subject_label *userp)
71565+{
71566+ if (copy_from_user(subj, userp, sizeof(struct acl_subject_label)))
71567+ return -EFAULT;
71568+
71569+ return 0;
71570+}
71571+
71572+static int copy_acl_role_label_normal(struct acl_role_label *role, const struct acl_role_label *userp)
71573+{
71574+ if (copy_from_user(role, userp, sizeof(struct acl_role_label)))
71575+ return -EFAULT;
71576+
71577+ return 0;
71578+}
71579+
71580+static int copy_role_allowed_ip_normal(struct role_allowed_ip *roleip, const struct role_allowed_ip *userp)
71581+{
71582+ if (copy_from_user(roleip, userp, sizeof(struct role_allowed_ip)))
71583+ return -EFAULT;
71584+
71585+ return 0;
71586+}
71587+
71588+static int copy_sprole_pw_normal(struct sprole_pw *pw, unsigned long idx, const struct sprole_pw *userp)
71589+{
71590+ if (copy_from_user(pw, userp + idx, sizeof(struct sprole_pw)))
71591+ return -EFAULT;
71592+
71593+ return 0;
71594+}
71595+
71596+static int copy_gr_hash_struct_normal(struct gr_hash_struct *hash, const struct gr_hash_struct *userp)
71597+{
71598+ if (copy_from_user(hash, userp, sizeof(struct gr_hash_struct)))
71599+ return -EFAULT;
71600+
71601+ return 0;
71602+}
71603+
71604+static int copy_role_transition_normal(struct role_transition *trans, const struct role_transition *userp)
71605+{
71606+ if (copy_from_user(trans, userp, sizeof(struct role_transition)))
71607+ return -EFAULT;
71608+
71609+ return 0;
71610+}
71611+
71612+int copy_pointer_from_array_normal(void *ptr, unsigned long idx, const void *userp)
71613+{
71614+ if (copy_from_user(ptr, userp + (idx * sizeof(void *)), sizeof(void *)))
71615+ return -EFAULT;
71616+
71617+ return 0;
71618+}
71619+
71620+static int copy_gr_arg_wrapper_normal(const char __user *buf, struct gr_arg_wrapper *uwrap)
71621+{
71622+ if (copy_from_user(uwrap, buf, sizeof (struct gr_arg_wrapper)))
71623+ return -EFAULT;
71624+
71625+ if (((uwrap->version != GRSECURITY_VERSION) &&
71626+ (uwrap->version != 0x2901)) ||
71627+ (uwrap->size != sizeof(struct gr_arg)))
71628+ return -EINVAL;
71629+
71630+ return 0;
71631+}
71632+
71633+static int copy_gr_arg_normal(const struct gr_arg __user *buf, struct gr_arg *arg)
71634+{
71635+ if (copy_from_user(arg, buf, sizeof (struct gr_arg)))
71636+ return -EFAULT;
71637+
71638+ return 0;
71639+}
71640+
71641+static size_t get_gr_arg_wrapper_size_normal(void)
71642+{
71643+ return sizeof(struct gr_arg_wrapper);
71644+}
71645+
71646+#ifdef CONFIG_COMPAT
71647+extern int copy_gr_arg_wrapper_compat(const char *buf, struct gr_arg_wrapper *uwrap);
71648+extern int copy_gr_arg_compat(const struct gr_arg __user *buf, struct gr_arg *arg);
71649+extern int copy_acl_object_label_compat(struct acl_object_label *obj, const struct acl_object_label *userp);
71650+extern int copy_acl_subject_label_compat(struct acl_subject_label *subj, const struct acl_subject_label *userp);
71651+extern int copy_acl_role_label_compat(struct acl_role_label *role, const struct acl_role_label *userp);
71652+extern int copy_role_allowed_ip_compat(struct role_allowed_ip *roleip, const struct role_allowed_ip *userp);
71653+extern int copy_role_transition_compat(struct role_transition *trans, const struct role_transition *userp);
71654+extern int copy_gr_hash_struct_compat(struct gr_hash_struct *hash, const struct gr_hash_struct *userp);
71655+extern int copy_pointer_from_array_compat(void *ptr, unsigned long idx, const void *userp);
71656+extern int copy_acl_ip_label_compat(struct acl_ip_label *ip, const struct acl_ip_label *userp);
71657+extern int copy_sprole_pw_compat(struct sprole_pw *pw, unsigned long idx, const struct sprole_pw *userp);
71658+extern size_t get_gr_arg_wrapper_size_compat(void);
71659+
71660+int (* copy_gr_arg_wrapper)(const char *buf, struct gr_arg_wrapper *uwrap) __read_only;
71661+int (* copy_gr_arg)(const struct gr_arg *buf, struct gr_arg *arg) __read_only;
71662+int (* copy_acl_object_label)(struct acl_object_label *obj, const struct acl_object_label *userp) __read_only;
71663+int (* copy_acl_subject_label)(struct acl_subject_label *subj, const struct acl_subject_label *userp) __read_only;
71664+int (* copy_acl_role_label)(struct acl_role_label *role, const struct acl_role_label *userp) __read_only;
71665+int (* copy_acl_ip_label)(struct acl_ip_label *ip, const struct acl_ip_label *userp) __read_only;
71666+int (* copy_pointer_from_array)(void *ptr, unsigned long idx, const void *userp) __read_only;
71667+int (* copy_sprole_pw)(struct sprole_pw *pw, unsigned long idx, const struct sprole_pw *userp) __read_only;
71668+int (* copy_gr_hash_struct)(struct gr_hash_struct *hash, const struct gr_hash_struct *userp) __read_only;
71669+int (* copy_role_transition)(struct role_transition *trans, const struct role_transition *userp) __read_only;
71670+int (* copy_role_allowed_ip)(struct role_allowed_ip *roleip, const struct role_allowed_ip *userp) __read_only;
71671+size_t (* get_gr_arg_wrapper_size)(void) __read_only;
71672+
71673+#else
71674+#define copy_gr_arg_wrapper copy_gr_arg_wrapper_normal
71675+#define copy_gr_arg copy_gr_arg_normal
71676+#define copy_gr_hash_struct copy_gr_hash_struct_normal
71677+#define copy_acl_object_label copy_acl_object_label_normal
71678+#define copy_acl_subject_label copy_acl_subject_label_normal
71679+#define copy_acl_role_label copy_acl_role_label_normal
71680+#define copy_acl_ip_label copy_acl_ip_label_normal
71681+#define copy_pointer_from_array copy_pointer_from_array_normal
71682+#define copy_sprole_pw copy_sprole_pw_normal
71683+#define copy_role_transition copy_role_transition_normal
71684+#define copy_role_allowed_ip copy_role_allowed_ip_normal
71685+#define get_gr_arg_wrapper_size get_gr_arg_wrapper_size_normal
71686+#endif
71687+
71688+static struct acl_subject_label *
71689+lookup_subject_map(const struct acl_subject_label *userp)
71690+{
71691+ unsigned int index = gr_shash(userp, polstate->subj_map_set.s_size);
71692+ struct subject_map *match;
71693+
71694+ match = polstate->subj_map_set.s_hash[index];
71695+
71696+ while (match && match->user != userp)
71697+ match = match->next;
71698+
71699+ if (match != NULL)
71700+ return match->kernel;
71701+ else
71702+ return NULL;
71703+}
71704+
71705+static void
71706+insert_subj_map_entry(struct subject_map *subjmap)
71707+{
71708+ unsigned int index = gr_shash(subjmap->user, polstate->subj_map_set.s_size);
71709+ struct subject_map **curr;
71710+
71711+ subjmap->prev = NULL;
71712+
71713+ curr = &polstate->subj_map_set.s_hash[index];
71714+ if (*curr != NULL)
71715+ (*curr)->prev = subjmap;
71716+
71717+ subjmap->next = *curr;
71718+ *curr = subjmap;
71719+
71720+ return;
71721+}
71722+
71723+static void
71724+__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
71725+{
71726+ unsigned int index =
71727+ gr_rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), polstate->acl_role_set.r_size);
71728+ struct acl_role_label **curr;
71729+ struct acl_role_label *tmp, *tmp2;
71730+
71731+ curr = &polstate->acl_role_set.r_hash[index];
71732+
71733+ /* simple case, slot is empty, just set it to our role */
71734+ if (*curr == NULL) {
71735+ *curr = role;
71736+ } else {
71737+ /* example:
71738+ 1 -> 2 -> 3 (adding 2 -> 3 to here)
71739+ 2 -> 3
71740+ */
71741+ /* first check to see if we can already be reached via this slot */
71742+ tmp = *curr;
71743+ while (tmp && tmp != role)
71744+ tmp = tmp->next;
71745+ if (tmp == role) {
71746+ /* we don't need to add ourselves to this slot's chain */
71747+ return;
71748+ }
71749+ /* we need to add ourselves to this chain, two cases */
71750+ if (role->next == NULL) {
71751+ /* simple case, append the current chain to our role */
71752+ role->next = *curr;
71753+ *curr = role;
71754+ } else {
71755+ /* 1 -> 2 -> 3 -> 4
71756+ 2 -> 3 -> 4
71757+ 3 -> 4 (adding 1 -> 2 -> 3 -> 4 to here)
71758+ */
71759+ /* trickier case: walk our role's chain until we find
71760+ the role for the start of the current slot's chain */
71761+ tmp = role;
71762+ tmp2 = *curr;
71763+ while (tmp->next && tmp->next != tmp2)
71764+ tmp = tmp->next;
71765+ if (tmp->next == tmp2) {
71766+ /* from example above, we found 3, so just
71767+ replace this slot's chain with ours */
71768+ *curr = role;
71769+ } else {
71770+ /* we didn't find a subset of our role's chain
71771+ in the current slot's chain, so append their
71772+ chain to ours, and set us as the first role in
71773+ the slot's chain
71774+
71775+ we could fold this case with the case above,
71776+ but making it explicit for clarity
71777+ */
71778+ tmp->next = tmp2;
71779+ *curr = role;
71780+ }
71781+ }
71782+ }
71783+
71784+ return;
71785+}
71786+
71787+static void
71788+insert_acl_role_label(struct acl_role_label *role)
71789+{
71790+ int i;
71791+
71792+ if (polstate->role_list == NULL) {
71793+ polstate->role_list = role;
71794+ role->prev = NULL;
71795+ } else {
71796+ role->prev = polstate->role_list;
71797+ polstate->role_list = role;
71798+ }
71799+
71800+ /* used for hash chains */
71801+ role->next = NULL;
71802+
71803+ if (role->roletype & GR_ROLE_DOMAIN) {
71804+ for (i = 0; i < role->domain_child_num; i++)
71805+ __insert_acl_role_label(role, role->domain_children[i]);
71806+ } else
71807+ __insert_acl_role_label(role, role->uidgid);
71808+}
71809+
71810+static int
71811+insert_name_entry(char *name, const ino_t inode, const dev_t device, __u8 deleted)
71812+{
71813+ struct name_entry **curr, *nentry;
71814+ struct inodev_entry *ientry;
71815+ unsigned int len = strlen(name);
71816+ unsigned int key = full_name_hash(name, len);
71817+ unsigned int index = key % polstate->name_set.n_size;
71818+
71819+ curr = &polstate->name_set.n_hash[index];
71820+
71821+ while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
71822+ curr = &((*curr)->next);
71823+
71824+ if (*curr != NULL)
71825+ return 1;
71826+
71827+ nentry = acl_alloc(sizeof (struct name_entry));
71828+ if (nentry == NULL)
71829+ return 0;
71830+ ientry = acl_alloc(sizeof (struct inodev_entry));
71831+ if (ientry == NULL)
71832+ return 0;
71833+ ientry->nentry = nentry;
71834+
71835+ nentry->key = key;
71836+ nentry->name = name;
71837+ nentry->inode = inode;
71838+ nentry->device = device;
71839+ nentry->len = len;
71840+ nentry->deleted = deleted;
71841+
71842+ nentry->prev = NULL;
71843+ curr = &polstate->name_set.n_hash[index];
71844+ if (*curr != NULL)
71845+ (*curr)->prev = nentry;
71846+ nentry->next = *curr;
71847+ *curr = nentry;
71848+
71849+ /* insert us into the table searchable by inode/dev */
71850+ __insert_inodev_entry(polstate, ientry);
71851+
71852+ return 1;
71853+}
71854+
71855+/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
71856+
71857+static void *
71858+create_table(__u32 * len, int elementsize)
71859+{
71860+ unsigned int table_sizes[] = {
71861+ 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
71862+ 32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
71863+ 4194301, 8388593, 16777213, 33554393, 67108859
71864+ };
71865+ void *newtable = NULL;
71866+ unsigned int pwr = 0;
71867+
71868+ while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
71869+ table_sizes[pwr] <= *len)
71870+ pwr++;
71871+
71872+ if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
71873+ return newtable;
71874+
71875+ if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
71876+ newtable =
71877+ kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
71878+ else
71879+ newtable = vmalloc(table_sizes[pwr] * elementsize);
71880+
71881+ *len = table_sizes[pwr];
71882+
71883+ return newtable;
71884+}
71885+
71886+static int
71887+init_variables(const struct gr_arg *arg, bool reload)
71888+{
71889+ struct task_struct *reaper = init_pid_ns.child_reaper;
71890+ unsigned int stacksize;
71891+
71892+ polstate->subj_map_set.s_size = arg->role_db.num_subjects;
71893+ polstate->acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
71894+ polstate->name_set.n_size = arg->role_db.num_objects;
71895+ polstate->inodev_set.i_size = arg->role_db.num_objects;
71896+
71897+ if (!polstate->subj_map_set.s_size || !polstate->acl_role_set.r_size ||
71898+ !polstate->name_set.n_size || !polstate->inodev_set.i_size)
71899+ return 1;
71900+
71901+ if (!reload) {
71902+ if (!gr_init_uidset())
71903+ return 1;
71904+ }
71905+
71906+ /* set up the stack that holds allocation info */
71907+
71908+ stacksize = arg->role_db.num_pointers + 5;
71909+
71910+ if (!acl_alloc_stack_init(stacksize))
71911+ return 1;
71912+
71913+ if (!reload) {
71914+ /* grab reference for the real root dentry and vfsmount */
71915+ get_fs_root(reaper->fs, &gr_real_root);
71916+
71917+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
71918+ printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(gr_real_root.dentry), gr_real_root.dentry->d_inode->i_ino);
71919+#endif
71920+
71921+ fakefs_obj_rw = kzalloc(sizeof(struct acl_object_label), GFP_KERNEL);
71922+ if (fakefs_obj_rw == NULL)
71923+ return 1;
71924+ fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE;
71925+
71926+ fakefs_obj_rwx = kzalloc(sizeof(struct acl_object_label), GFP_KERNEL);
71927+ if (fakefs_obj_rwx == NULL)
71928+ return 1;
71929+ fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
71930+ }
71931+
71932+ polstate->subj_map_set.s_hash =
71933+ (struct subject_map **) create_table(&polstate->subj_map_set.s_size, sizeof(void *));
71934+ polstate->acl_role_set.r_hash =
71935+ (struct acl_role_label **) create_table(&polstate->acl_role_set.r_size, sizeof(void *));
71936+ polstate->name_set.n_hash = (struct name_entry **) create_table(&polstate->name_set.n_size, sizeof(void *));
71937+ polstate->inodev_set.i_hash =
71938+ (struct inodev_entry **) create_table(&polstate->inodev_set.i_size, sizeof(void *));
71939+
71940+ if (!polstate->subj_map_set.s_hash || !polstate->acl_role_set.r_hash ||
71941+ !polstate->name_set.n_hash || !polstate->inodev_set.i_hash)
71942+ return 1;
71943+
71944+ memset(polstate->subj_map_set.s_hash, 0,
71945+ sizeof(struct subject_map *) * polstate->subj_map_set.s_size);
71946+ memset(polstate->acl_role_set.r_hash, 0,
71947+ sizeof (struct acl_role_label *) * polstate->acl_role_set.r_size);
71948+ memset(polstate->name_set.n_hash, 0,
71949+ sizeof (struct name_entry *) * polstate->name_set.n_size);
71950+ memset(polstate->inodev_set.i_hash, 0,
71951+ sizeof (struct inodev_entry *) * polstate->inodev_set.i_size);
71952+
71953+ return 0;
71954+}
71955+
71956+/* free information not needed after startup
71957+ currently contains user->kernel pointer mappings for subjects
71958+*/
71959+
71960+static void
71961+free_init_variables(void)
71962+{
71963+ __u32 i;
71964+
71965+ if (polstate->subj_map_set.s_hash) {
71966+ for (i = 0; i < polstate->subj_map_set.s_size; i++) {
71967+ if (polstate->subj_map_set.s_hash[i]) {
71968+ kfree(polstate->subj_map_set.s_hash[i]);
71969+ polstate->subj_map_set.s_hash[i] = NULL;
71970+ }
71971+ }
71972+
71973+ if ((polstate->subj_map_set.s_size * sizeof (struct subject_map *)) <=
71974+ PAGE_SIZE)
71975+ kfree(polstate->subj_map_set.s_hash);
71976+ else
71977+ vfree(polstate->subj_map_set.s_hash);
71978+ }
71979+
71980+ return;
71981+}
71982+
71983+static void
71984+free_variables(bool reload)
71985+{
71986+ struct acl_subject_label *s;
71987+ struct acl_role_label *r;
71988+ struct task_struct *task, *task2;
71989+ unsigned int x;
71990+
71991+ if (!reload) {
71992+ gr_clear_learn_entries();
71993+
71994+ read_lock(&tasklist_lock);
71995+ do_each_thread(task2, task) {
71996+ task->acl_sp_role = 0;
71997+ task->acl_role_id = 0;
71998+ task->inherited = 0;
71999+ task->acl = NULL;
72000+ task->role = NULL;
72001+ } while_each_thread(task2, task);
72002+ read_unlock(&tasklist_lock);
72003+
72004+ kfree(fakefs_obj_rw);
72005+ fakefs_obj_rw = NULL;
72006+ kfree(fakefs_obj_rwx);
72007+ fakefs_obj_rwx = NULL;
72008+
72009+ /* release the reference to the real root dentry and vfsmount */
72010+ path_put(&gr_real_root);
72011+ memset(&gr_real_root, 0, sizeof(gr_real_root));
72012+ }
72013+
72014+ /* free all object hash tables */
72015+
72016+ FOR_EACH_ROLE_START(r)
72017+ if (r->subj_hash == NULL)
72018+ goto next_role;
72019+ FOR_EACH_SUBJECT_START(r, s, x)
72020+ if (s->obj_hash == NULL)
72021+ break;
72022+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
72023+ kfree(s->obj_hash);
72024+ else
72025+ vfree(s->obj_hash);
72026+ FOR_EACH_SUBJECT_END(s, x)
72027+ FOR_EACH_NESTED_SUBJECT_START(r, s)
72028+ if (s->obj_hash == NULL)
72029+ break;
72030+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
72031+ kfree(s->obj_hash);
72032+ else
72033+ vfree(s->obj_hash);
72034+ FOR_EACH_NESTED_SUBJECT_END(s)
72035+ if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
72036+ kfree(r->subj_hash);
72037+ else
72038+ vfree(r->subj_hash);
72039+ r->subj_hash = NULL;
72040+next_role:
72041+ FOR_EACH_ROLE_END(r)
72042+
72043+ acl_free_all();
72044+
72045+ if (polstate->acl_role_set.r_hash) {
72046+ if ((polstate->acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
72047+ PAGE_SIZE)
72048+ kfree(polstate->acl_role_set.r_hash);
72049+ else
72050+ vfree(polstate->acl_role_set.r_hash);
72051+ }
72052+ if (polstate->name_set.n_hash) {
72053+ if ((polstate->name_set.n_size * sizeof (struct name_entry *)) <=
72054+ PAGE_SIZE)
72055+ kfree(polstate->name_set.n_hash);
72056+ else
72057+ vfree(polstate->name_set.n_hash);
72058+ }
72059+
72060+ if (polstate->inodev_set.i_hash) {
72061+ if ((polstate->inodev_set.i_size * sizeof (struct inodev_entry *)) <=
72062+ PAGE_SIZE)
72063+ kfree(polstate->inodev_set.i_hash);
72064+ else
72065+ vfree(polstate->inodev_set.i_hash);
72066+ }
72067+
72068+ if (!reload)
72069+ gr_free_uidset();
72070+
72071+ memset(&polstate->name_set, 0, sizeof (struct name_db));
72072+ memset(&polstate->inodev_set, 0, sizeof (struct inodev_db));
72073+ memset(&polstate->acl_role_set, 0, sizeof (struct acl_role_db));
72074+ memset(&polstate->subj_map_set, 0, sizeof (struct acl_subj_map_db));
72075+
72076+ polstate->default_role = NULL;
72077+ polstate->kernel_role = NULL;
72078+ polstate->role_list = NULL;
72079+
72080+ return;
72081+}
72082+
72083+static struct acl_subject_label *
72084+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role, int *already_copied);
72085+
72086+static int alloc_and_copy_string(char **name, unsigned int maxlen)
72087+{
72088+ unsigned int len = strnlen_user(*name, maxlen);
72089+ char *tmp;
72090+
72091+ if (!len || len >= maxlen)
72092+ return -EINVAL;
72093+
72094+ if ((tmp = (char *) acl_alloc(len)) == NULL)
72095+ return -ENOMEM;
72096+
72097+ if (copy_from_user(tmp, *name, len))
72098+ return -EFAULT;
72099+
72100+ tmp[len-1] = '\0';
72101+ *name = tmp;
72102+
72103+ return 0;
72104+}
72105+
72106+static int
72107+copy_user_glob(struct acl_object_label *obj)
72108+{
72109+ struct acl_object_label *g_tmp, **guser;
72110+ int error;
72111+
72112+ if (obj->globbed == NULL)
72113+ return 0;
72114+
72115+ guser = &obj->globbed;
72116+ while (*guser) {
72117+ g_tmp = (struct acl_object_label *)
72118+ acl_alloc(sizeof (struct acl_object_label));
72119+ if (g_tmp == NULL)
72120+ return -ENOMEM;
72121+
72122+ if (copy_acl_object_label(g_tmp, *guser))
72123+ return -EFAULT;
72124+
72125+ error = alloc_and_copy_string(&g_tmp->filename, PATH_MAX);
72126+ if (error)
72127+ return error;
72128+
72129+ *guser = g_tmp;
72130+ guser = &(g_tmp->next);
72131+ }
72132+
72133+ return 0;
72134+}
72135+
72136+static int
72137+copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
72138+ struct acl_role_label *role)
72139+{
72140+ struct acl_object_label *o_tmp;
72141+ int ret;
72142+
72143+ while (userp) {
72144+ if ((o_tmp = (struct acl_object_label *)
72145+ acl_alloc(sizeof (struct acl_object_label))) == NULL)
72146+ return -ENOMEM;
72147+
72148+ if (copy_acl_object_label(o_tmp, userp))
72149+ return -EFAULT;
72150+
72151+ userp = o_tmp->prev;
72152+
72153+ ret = alloc_and_copy_string(&o_tmp->filename, PATH_MAX);
72154+ if (ret)
72155+ return ret;
72156+
72157+ insert_acl_obj_label(o_tmp, subj);
72158+ if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
72159+ o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
72160+ return -ENOMEM;
72161+
72162+ ret = copy_user_glob(o_tmp);
72163+ if (ret)
72164+ return ret;
72165+
72166+ if (o_tmp->nested) {
72167+ int already_copied;
72168+
72169+ o_tmp->nested = do_copy_user_subj(o_tmp->nested, role, &already_copied);
72170+ if (IS_ERR(o_tmp->nested))
72171+ return PTR_ERR(o_tmp->nested);
72172+
72173+ /* insert into nested subject list if we haven't copied this one yet
72174+ to prevent duplicate entries */
72175+ if (!already_copied) {
72176+ o_tmp->nested->next = role->hash->first;
72177+ role->hash->first = o_tmp->nested;
72178+ }
72179+ }
72180+ }
72181+
72182+ return 0;
72183+}
72184+
72185+static __u32
72186+count_user_subjs(struct acl_subject_label *userp)
72187+{
72188+ struct acl_subject_label s_tmp;
72189+ __u32 num = 0;
72190+
72191+ while (userp) {
72192+ if (copy_acl_subject_label(&s_tmp, userp))
72193+ break;
72194+
72195+ userp = s_tmp.prev;
72196+ }
72197+
72198+ return num;
72199+}
72200+
72201+static int
72202+copy_user_allowedips(struct acl_role_label *rolep)
72203+{
72204+ struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
72205+
72206+ ruserip = rolep->allowed_ips;
72207+
72208+ while (ruserip) {
72209+ rlast = rtmp;
72210+
72211+ if ((rtmp = (struct role_allowed_ip *)
72212+ acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
72213+ return -ENOMEM;
72214+
72215+ if (copy_role_allowed_ip(rtmp, ruserip))
72216+ return -EFAULT;
72217+
72218+ ruserip = rtmp->prev;
72219+
72220+ if (!rlast) {
72221+ rtmp->prev = NULL;
72222+ rolep->allowed_ips = rtmp;
72223+ } else {
72224+ rlast->next = rtmp;
72225+ rtmp->prev = rlast;
72226+ }
72227+
72228+ if (!ruserip)
72229+ rtmp->next = NULL;
72230+ }
72231+
72232+ return 0;
72233+}
72234+
72235+static int
72236+copy_user_transitions(struct acl_role_label *rolep)
72237+{
72238+ struct role_transition *rusertp, *rtmp = NULL, *rlast;
72239+ int error;
72240+
72241+ rusertp = rolep->transitions;
72242+
72243+ while (rusertp) {
72244+ rlast = rtmp;
72245+
72246+ if ((rtmp = (struct role_transition *)
72247+ acl_alloc(sizeof (struct role_transition))) == NULL)
72248+ return -ENOMEM;
72249+
72250+ if (copy_role_transition(rtmp, rusertp))
72251+ return -EFAULT;
72252+
72253+ rusertp = rtmp->prev;
72254+
72255+ error = alloc_and_copy_string(&rtmp->rolename, GR_SPROLE_LEN);
72256+ if (error)
72257+ return error;
72258+
72259+ if (!rlast) {
72260+ rtmp->prev = NULL;
72261+ rolep->transitions = rtmp;
72262+ } else {
72263+ rlast->next = rtmp;
72264+ rtmp->prev = rlast;
72265+ }
72266+
72267+ if (!rusertp)
72268+ rtmp->next = NULL;
72269+ }
72270+
72271+ return 0;
72272+}
72273+
72274+static __u32 count_user_objs(const struct acl_object_label __user *userp)
72275+{
72276+ struct acl_object_label o_tmp;
72277+ __u32 num = 0;
72278+
72279+ while (userp) {
72280+ if (copy_acl_object_label(&o_tmp, userp))
72281+ break;
72282+
72283+ userp = o_tmp.prev;
72284+ num++;
72285+ }
72286+
72287+ return num;
72288+}
72289+
72290+static struct acl_subject_label *
72291+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role, int *already_copied)
72292+{
72293+ struct acl_subject_label *s_tmp = NULL, *s_tmp2;
72294+ __u32 num_objs;
72295+ struct acl_ip_label **i_tmp, *i_utmp2;
72296+ struct gr_hash_struct ghash;
72297+ struct subject_map *subjmap;
72298+ unsigned int i_num;
72299+ int err;
72300+
72301+ if (already_copied != NULL)
72302+ *already_copied = 0;
72303+
72304+ s_tmp = lookup_subject_map(userp);
72305+
72306+ /* we've already copied this subject into the kernel, just return
72307+ the reference to it, and don't copy it over again
72308+ */
72309+ if (s_tmp) {
72310+ if (already_copied != NULL)
72311+ *already_copied = 1;
72312+ return(s_tmp);
72313+ }
72314+
72315+ if ((s_tmp = (struct acl_subject_label *)
72316+ acl_alloc(sizeof (struct acl_subject_label))) == NULL)
72317+ return ERR_PTR(-ENOMEM);
72318+
72319+ subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
72320+ if (subjmap == NULL)
72321+ return ERR_PTR(-ENOMEM);
72322+
72323+ subjmap->user = userp;
72324+ subjmap->kernel = s_tmp;
72325+ insert_subj_map_entry(subjmap);
72326+
72327+ if (copy_acl_subject_label(s_tmp, userp))
72328+ return ERR_PTR(-EFAULT);
72329+
72330+ err = alloc_and_copy_string(&s_tmp->filename, PATH_MAX);
72331+ if (err)
72332+ return ERR_PTR(err);
72333+
72334+ if (!strcmp(s_tmp->filename, "/"))
72335+ role->root_label = s_tmp;
72336+
72337+ if (copy_gr_hash_struct(&ghash, s_tmp->hash))
72338+ return ERR_PTR(-EFAULT);
72339+
72340+ /* copy user and group transition tables */
72341+
72342+ if (s_tmp->user_trans_num) {
72343+ uid_t *uidlist;
72344+
72345+ uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
72346+ if (uidlist == NULL)
72347+ return ERR_PTR(-ENOMEM);
72348+ if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
72349+ return ERR_PTR(-EFAULT);
72350+
72351+ s_tmp->user_transitions = uidlist;
72352+ }
72353+
72354+ if (s_tmp->group_trans_num) {
72355+ gid_t *gidlist;
72356+
72357+ gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
72358+ if (gidlist == NULL)
72359+ return ERR_PTR(-ENOMEM);
72360+ if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
72361+ return ERR_PTR(-EFAULT);
72362+
72363+ s_tmp->group_transitions = gidlist;
72364+ }
72365+
72366+ /* set up object hash table */
72367+ num_objs = count_user_objs(ghash.first);
72368+
72369+ s_tmp->obj_hash_size = num_objs;
72370+ s_tmp->obj_hash =
72371+ (struct acl_object_label **)
72372+ create_table(&(s_tmp->obj_hash_size), sizeof(void *));
72373+
72374+ if (!s_tmp->obj_hash)
72375+ return ERR_PTR(-ENOMEM);
72376+
72377+ memset(s_tmp->obj_hash, 0,
72378+ s_tmp->obj_hash_size *
72379+ sizeof (struct acl_object_label *));
72380+
72381+ /* add in objects */
72382+ err = copy_user_objs(ghash.first, s_tmp, role);
72383+
72384+ if (err)
72385+ return ERR_PTR(err);
72386+
72387+ /* set pointer for parent subject */
72388+ if (s_tmp->parent_subject) {
72389+ s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role, NULL);
72390+
72391+ if (IS_ERR(s_tmp2))
72392+ return s_tmp2;
72393+
72394+ s_tmp->parent_subject = s_tmp2;
72395+ }
72396+
72397+ /* add in ip acls */
72398+
72399+ if (!s_tmp->ip_num) {
72400+ s_tmp->ips = NULL;
72401+ goto insert;
72402+ }
72403+
72404+ i_tmp =
72405+ (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
72406+ sizeof (struct acl_ip_label *));
72407+
72408+ if (!i_tmp)
72409+ return ERR_PTR(-ENOMEM);
72410+
72411+ for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
72412+ *(i_tmp + i_num) =
72413+ (struct acl_ip_label *)
72414+ acl_alloc(sizeof (struct acl_ip_label));
72415+ if (!*(i_tmp + i_num))
72416+ return ERR_PTR(-ENOMEM);
72417+
72418+ if (copy_pointer_from_array(&i_utmp2, i_num, s_tmp->ips))
72419+ return ERR_PTR(-EFAULT);
72420+
72421+ if (copy_acl_ip_label(*(i_tmp + i_num), i_utmp2))
72422+ return ERR_PTR(-EFAULT);
72423+
72424+ if ((*(i_tmp + i_num))->iface == NULL)
72425+ continue;
72426+
72427+ err = alloc_and_copy_string(&(*(i_tmp + i_num))->iface, IFNAMSIZ);
72428+ if (err)
72429+ return ERR_PTR(err);
72430+ }
72431+
72432+ s_tmp->ips = i_tmp;
72433+
72434+insert:
72435+ if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
72436+ s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
72437+ return ERR_PTR(-ENOMEM);
72438+
72439+ return s_tmp;
72440+}
72441+
72442+static int
72443+copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
72444+{
72445+ struct acl_subject_label s_pre;
72446+ struct acl_subject_label * ret;
72447+ int err;
72448+
72449+ while (userp) {
72450+ if (copy_acl_subject_label(&s_pre, userp))
72451+ return -EFAULT;
72452+
72453+ ret = do_copy_user_subj(userp, role, NULL);
72454+
72455+ err = PTR_ERR(ret);
72456+ if (IS_ERR(ret))
72457+ return err;
72458+
72459+ insert_acl_subj_label(ret, role);
72460+
72461+ userp = s_pre.prev;
72462+ }
72463+
72464+ return 0;
72465+}
72466+
72467+static int
72468+copy_user_acl(struct gr_arg *arg)
72469+{
72470+ struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
72471+ struct acl_subject_label *subj_list;
72472+ struct sprole_pw *sptmp;
72473+ struct gr_hash_struct *ghash;
72474+ uid_t *domainlist;
72475+ unsigned int r_num;
72476+ int err = 0;
72477+ __u16 i;
72478+ __u32 num_subjs;
72479+
72480+ /* we need a default and kernel role */
72481+ if (arg->role_db.num_roles < 2)
72482+ return -EINVAL;
72483+
72484+ /* copy special role authentication info from userspace */
72485+
72486+ polstate->num_sprole_pws = arg->num_sprole_pws;
72487+ polstate->acl_special_roles = (struct sprole_pw **) acl_alloc_num(polstate->num_sprole_pws, sizeof(struct sprole_pw *));
72488+
72489+ if (!polstate->acl_special_roles && polstate->num_sprole_pws)
72490+ return -ENOMEM;
72491+
72492+ for (i = 0; i < polstate->num_sprole_pws; i++) {
72493+ sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
72494+ if (!sptmp)
72495+ return -ENOMEM;
72496+ if (copy_sprole_pw(sptmp, i, arg->sprole_pws))
72497+ return -EFAULT;
72498+
72499+ err = alloc_and_copy_string((char **)&sptmp->rolename, GR_SPROLE_LEN);
72500+ if (err)
72501+ return err;
72502+
72503+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
72504+ printk(KERN_ALERT "Copying special role %s\n", sptmp->rolename);
72505+#endif
72506+
72507+ polstate->acl_special_roles[i] = sptmp;
72508+ }
72509+
72510+ r_utmp = (struct acl_role_label **) arg->role_db.r_table;
72511+
72512+ for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
72513+ r_tmp = acl_alloc(sizeof (struct acl_role_label));
72514+
72515+ if (!r_tmp)
72516+ return -ENOMEM;
72517+
72518+ if (copy_pointer_from_array(&r_utmp2, r_num, r_utmp))
72519+ return -EFAULT;
72520+
72521+ if (copy_acl_role_label(r_tmp, r_utmp2))
72522+ return -EFAULT;
72523+
72524+ err = alloc_and_copy_string(&r_tmp->rolename, GR_SPROLE_LEN);
72525+ if (err)
72526+ return err;
72527+
72528+ if (!strcmp(r_tmp->rolename, "default")
72529+ && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
72530+ polstate->default_role = r_tmp;
72531+ } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
72532+ polstate->kernel_role = r_tmp;
72533+ }
72534+
72535+ if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL)
72536+ return -ENOMEM;
72537+
72538+ if (copy_gr_hash_struct(ghash, r_tmp->hash))
72539+ return -EFAULT;
72540+
72541+ r_tmp->hash = ghash;
72542+
72543+ num_subjs = count_user_subjs(r_tmp->hash->first);
72544+
72545+ r_tmp->subj_hash_size = num_subjs;
72546+ r_tmp->subj_hash =
72547+ (struct acl_subject_label **)
72548+ create_table(&(r_tmp->subj_hash_size), sizeof(void *));
72549+
72550+ if (!r_tmp->subj_hash)
72551+ return -ENOMEM;
72552+
72553+ err = copy_user_allowedips(r_tmp);
72554+ if (err)
72555+ return err;
72556+
72557+ /* copy domain info */
72558+ if (r_tmp->domain_children != NULL) {
72559+ domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
72560+ if (domainlist == NULL)
72561+ return -ENOMEM;
72562+
72563+ if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t)))
72564+ return -EFAULT;
72565+
72566+ r_tmp->domain_children = domainlist;
72567+ }
72568+
72569+ err = copy_user_transitions(r_tmp);
72570+ if (err)
72571+ return err;
72572+
72573+ memset(r_tmp->subj_hash, 0,
72574+ r_tmp->subj_hash_size *
72575+ sizeof (struct acl_subject_label *));
72576+
72577+ /* acquire the list of subjects, then NULL out
72578+ the list prior to parsing the subjects for this role,
72579+ as during this parsing the list is replaced with a list
72580+ of *nested* subjects for the role
72581+ */
72582+ subj_list = r_tmp->hash->first;
72583+
72584+ /* set nested subject list to null */
72585+ r_tmp->hash->first = NULL;
72586+
72587+ err = copy_user_subjs(subj_list, r_tmp);
72588+
72589+ if (err)
72590+ return err;
72591+
72592+ insert_acl_role_label(r_tmp);
72593+ }
72594+
72595+ if (polstate->default_role == NULL || polstate->kernel_role == NULL)
72596+ return -EINVAL;
72597+
72598+ return err;
72599+}
72600+
72601+static int gracl_reload_apply_policies(void *reload)
72602+{
72603+ struct gr_reload_state *reload_state = (struct gr_reload_state *)reload;
72604+ struct task_struct *task, *task2;
72605+ struct acl_role_label *role, *rtmp;
72606+ struct acl_subject_label *subj;
72607+ const struct cred *cred;
72608+ int role_applied;
72609+ int ret = 0;
72610+
72611+ memcpy(&reload_state->oldpolicy, reload_state->oldpolicy_ptr, sizeof(struct gr_policy_state));
72612+ memcpy(&reload_state->oldalloc, reload_state->oldalloc_ptr, sizeof(struct gr_alloc_state));
72613+
72614+ /* first make sure we'll be able to apply the new policy cleanly */
72615+ do_each_thread(task2, task) {
72616+ if (task->exec_file == NULL)
72617+ continue;
72618+ role_applied = 0;
72619+ if (!reload_state->oldmode && task->role->roletype & GR_ROLE_SPECIAL) {
72620+ /* preserve special roles */
72621+ FOR_EACH_ROLE_START(role)
72622+ if ((role->roletype & GR_ROLE_SPECIAL) && !strcmp(task->role->rolename, role->rolename)) {
72623+ rtmp = task->role;
72624+ task->role = role;
72625+ role_applied = 1;
72626+ break;
72627+ }
72628+ FOR_EACH_ROLE_END(role)
72629+ }
72630+ if (!role_applied) {
72631+ cred = __task_cred(task);
72632+ rtmp = task->role;
72633+ task->role = __lookup_acl_role_label(polstate, task, GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid));
72634+ }
72635+ /* this handles non-nested inherited subjects, nested subjects will still
72636+ be dropped currently */
72637+ subj = __gr_get_subject_for_task(polstate, task, task->acl->filename);
72638+ task->tmpacl = __gr_get_subject_for_task(polstate, task, NULL);
72639+ /* change the role back so that we've made no modifications to the policy */
72640+ task->role = rtmp;
72641+
72642+ if (subj == NULL || task->tmpacl == NULL) {
72643+ ret = -EINVAL;
72644+ goto out;
72645+ }
72646+ } while_each_thread(task2, task);
72647+
72648+ /* now actually apply the policy */
72649+
72650+ do_each_thread(task2, task) {
72651+ if (task->exec_file) {
72652+ role_applied = 0;
72653+ if (!reload_state->oldmode && task->role->roletype & GR_ROLE_SPECIAL) {
72654+ /* preserve special roles */
72655+ FOR_EACH_ROLE_START(role)
72656+ if ((role->roletype & GR_ROLE_SPECIAL) && !strcmp(task->role->rolename, role->rolename)) {
72657+ task->role = role;
72658+ role_applied = 1;
72659+ break;
72660+ }
72661+ FOR_EACH_ROLE_END(role)
72662+ }
72663+ if (!role_applied) {
72664+ cred = __task_cred(task);
72665+ task->role = __lookup_acl_role_label(polstate, task, GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid));
72666+ }
72667+ /* this handles non-nested inherited subjects, nested subjects will still
72668+ be dropped currently */
72669+ if (!reload_state->oldmode && task->inherited)
72670+ subj = __gr_get_subject_for_task(polstate, task, task->acl->filename);
72671+ else {
72672+ /* looked up and tagged to the task previously */
72673+ subj = task->tmpacl;
72674+ }
72675+ /* subj will be non-null */
72676+ __gr_apply_subject_to_task(polstate, task, subj);
72677+ if (reload_state->oldmode) {
72678+ task->acl_role_id = 0;
72679+ task->acl_sp_role = 0;
72680+ task->inherited = 0;
72681+ }
72682+ } else {
72683+ // it's a kernel process
72684+ task->role = polstate->kernel_role;
72685+ task->acl = polstate->kernel_role->root_label;
72686+#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
72687+ task->acl->mode &= ~GR_PROCFIND;
72688+#endif
72689+ }
72690+ } while_each_thread(task2, task);
72691+
72692+ memcpy(reload_state->oldpolicy_ptr, &reload_state->newpolicy, sizeof(struct gr_policy_state));
72693+ memcpy(reload_state->oldalloc_ptr, &reload_state->newalloc, sizeof(struct gr_alloc_state));
72694+
72695+out:
72696+
72697+ return ret;
72698+}
72699+
72700+static int gracl_reload(struct gr_arg *args, unsigned char oldmode)
72701+{
72702+ struct gr_reload_state new_reload_state = { };
72703+ int err;
72704+
72705+ new_reload_state.oldpolicy_ptr = polstate;
72706+ new_reload_state.oldalloc_ptr = current_alloc_state;
72707+ new_reload_state.oldmode = oldmode;
72708+
72709+ current_alloc_state = &new_reload_state.newalloc;
72710+ polstate = &new_reload_state.newpolicy;
72711+
72712+ /* everything relevant is now saved off, copy in the new policy */
72713+ if (init_variables(args, true)) {
72714+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
72715+ err = -ENOMEM;
72716+ goto error;
72717+ }
72718+
72719+ err = copy_user_acl(args);
72720+ free_init_variables();
72721+ if (err)
72722+ goto error;
72723+ /* the new policy is copied in, with the old policy available via saved_state
72724+ first go through applying roles, making sure to preserve special roles
72725+ then apply new subjects, making sure to preserve inherited and nested subjects,
72726+ though currently only inherited subjects will be preserved
72727+ */
72728+ err = stop_machine(gracl_reload_apply_policies, &new_reload_state, NULL);
72729+ if (err)
72730+ goto error;
72731+
72732+ /* we've now applied the new policy, so restore the old policy state to free it */
72733+ polstate = &new_reload_state.oldpolicy;
72734+ current_alloc_state = &new_reload_state.oldalloc;
72735+ free_variables(true);
72736+
72737+ /* oldpolicy/oldalloc_ptr point to the new policy/alloc states as they were copied
72738+ to running_polstate/current_alloc_state inside stop_machine
72739+ */
72740+ err = 0;
72741+ goto out;
72742+error:
72743+ /* on error of loading the new policy, we'll just keep the previous
72744+ policy set around
72745+ */
72746+ free_variables(true);
72747+
72748+ /* doesn't affect runtime, but maintains consistent state */
72749+out:
72750+ polstate = new_reload_state.oldpolicy_ptr;
72751+ current_alloc_state = new_reload_state.oldalloc_ptr;
72752+
72753+ return err;
72754+}
72755+
72756+static int
72757+gracl_init(struct gr_arg *args)
72758+{
72759+ int error = 0;
72760+
72761+ memcpy(&gr_system_salt, args->salt, sizeof(gr_system_salt));
72762+ memcpy(&gr_system_sum, args->sum, sizeof(gr_system_sum));
72763+
72764+ if (init_variables(args, false)) {
72765+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
72766+ error = -ENOMEM;
72767+ goto out;
72768+ }
72769+
72770+ error = copy_user_acl(args);
72771+ free_init_variables();
72772+ if (error)
72773+ goto out;
72774+
72775+ error = gr_set_acls(0);
72776+ if (error)
72777+ goto out;
72778+
72779+ gr_enable_rbac_system();
72780+
72781+ return 0;
72782+
72783+out:
72784+ free_variables(false);
72785+ return error;
72786+}
72787+
72788+static int
72789+lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
72790+ unsigned char **sum)
72791+{
72792+ struct acl_role_label *r;
72793+ struct role_allowed_ip *ipp;
72794+ struct role_transition *trans;
72795+ unsigned int i;
72796+ int found = 0;
72797+ u32 curr_ip = current->signal->curr_ip;
72798+
72799+ current->signal->saved_ip = curr_ip;
72800+
72801+ /* check transition table */
72802+
72803+ for (trans = current->role->transitions; trans; trans = trans->next) {
72804+ if (!strcmp(rolename, trans->rolename)) {
72805+ found = 1;
72806+ break;
72807+ }
72808+ }
72809+
72810+ if (!found)
72811+ return 0;
72812+
72813+ /* handle special roles that do not require authentication
72814+ and check ip */
72815+
72816+ FOR_EACH_ROLE_START(r)
72817+ if (!strcmp(rolename, r->rolename) &&
72818+ (r->roletype & GR_ROLE_SPECIAL)) {
72819+ found = 0;
72820+ if (r->allowed_ips != NULL) {
72821+ for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
72822+ if ((ntohl(curr_ip) & ipp->netmask) ==
72823+ (ntohl(ipp->addr) & ipp->netmask))
72824+ found = 1;
72825+ }
72826+ } else
72827+ found = 2;
72828+ if (!found)
72829+ return 0;
72830+
72831+ if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
72832+ ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
72833+ *salt = NULL;
72834+ *sum = NULL;
72835+ return 1;
72836+ }
72837+ }
72838+ FOR_EACH_ROLE_END(r)
72839+
72840+ for (i = 0; i < polstate->num_sprole_pws; i++) {
72841+ if (!strcmp(rolename, polstate->acl_special_roles[i]->rolename)) {
72842+ *salt = polstate->acl_special_roles[i]->salt;
72843+ *sum = polstate->acl_special_roles[i]->sum;
72844+ return 1;
72845+ }
72846+ }
72847+
72848+ return 0;
72849+}
72850+
72851+int gr_check_secure_terminal(struct task_struct *task)
72852+{
72853+ struct task_struct *p, *p2, *p3;
72854+ struct files_struct *files;
72855+ struct fdtable *fdt;
72856+ struct file *our_file = NULL, *file;
72857+ int i;
72858+
72859+ if (task->signal->tty == NULL)
72860+ return 1;
72861+
72862+ files = get_files_struct(task);
72863+ if (files != NULL) {
72864+ rcu_read_lock();
72865+ fdt = files_fdtable(files);
72866+ for (i=0; i < fdt->max_fds; i++) {
72867+ file = fcheck_files(files, i);
72868+ if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
72869+ get_file(file);
72870+ our_file = file;
72871+ }
72872+ }
72873+ rcu_read_unlock();
72874+ put_files_struct(files);
72875+ }
72876+
72877+ if (our_file == NULL)
72878+ return 1;
72879+
72880+ read_lock(&tasklist_lock);
72881+ do_each_thread(p2, p) {
72882+ files = get_files_struct(p);
72883+ if (files == NULL ||
72884+ (p->signal && p->signal->tty == task->signal->tty)) {
72885+ if (files != NULL)
72886+ put_files_struct(files);
72887+ continue;
72888+ }
72889+ rcu_read_lock();
72890+ fdt = files_fdtable(files);
72891+ for (i=0; i < fdt->max_fds; i++) {
72892+ file = fcheck_files(files, i);
72893+ if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) &&
72894+ file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) {
72895+ p3 = task;
72896+ while (task_pid_nr(p3) > 0) {
72897+ if (p3 == p)
72898+ break;
72899+ p3 = p3->real_parent;
72900+ }
72901+ if (p3 == p)
72902+ break;
72903+ gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
72904+ gr_handle_alertkill(p);
72905+ rcu_read_unlock();
72906+ put_files_struct(files);
72907+ read_unlock(&tasklist_lock);
72908+ fput(our_file);
72909+ return 0;
72910+ }
72911+ }
72912+ rcu_read_unlock();
72913+ put_files_struct(files);
72914+ } while_each_thread(p2, p);
72915+ read_unlock(&tasklist_lock);
72916+
72917+ fput(our_file);
72918+ return 1;
72919+}
72920+
72921+ssize_t
72922+write_grsec_handler(struct file *file, const char __user * buf, size_t count, loff_t *ppos)
72923+{
72924+ struct gr_arg_wrapper uwrap;
72925+ unsigned char *sprole_salt = NULL;
72926+ unsigned char *sprole_sum = NULL;
72927+ int error = 0;
72928+ int error2 = 0;
72929+ size_t req_count = 0;
72930+ unsigned char oldmode = 0;
72931+
72932+ mutex_lock(&gr_dev_mutex);
72933+
72934+ if (gr_acl_is_enabled() && !(current->acl->mode & GR_KERNELAUTH)) {
72935+ error = -EPERM;
72936+ goto out;
72937+ }
72938+
72939+#ifdef CONFIG_COMPAT
72940+ pax_open_kernel();
72941+ if (is_compat_task()) {
72942+ copy_gr_arg_wrapper = &copy_gr_arg_wrapper_compat;
72943+ copy_gr_arg = &copy_gr_arg_compat;
72944+ copy_acl_object_label = &copy_acl_object_label_compat;
72945+ copy_acl_subject_label = &copy_acl_subject_label_compat;
72946+ copy_acl_role_label = &copy_acl_role_label_compat;
72947+ copy_acl_ip_label = &copy_acl_ip_label_compat;
72948+ copy_role_allowed_ip = &copy_role_allowed_ip_compat;
72949+ copy_role_transition = &copy_role_transition_compat;
72950+ copy_sprole_pw = &copy_sprole_pw_compat;
72951+ copy_gr_hash_struct = &copy_gr_hash_struct_compat;
72952+ copy_pointer_from_array = &copy_pointer_from_array_compat;
72953+ get_gr_arg_wrapper_size = &get_gr_arg_wrapper_size_compat;
72954+ } else {
72955+ copy_gr_arg_wrapper = &copy_gr_arg_wrapper_normal;
72956+ copy_gr_arg = &copy_gr_arg_normal;
72957+ copy_acl_object_label = &copy_acl_object_label_normal;
72958+ copy_acl_subject_label = &copy_acl_subject_label_normal;
72959+ copy_acl_role_label = &copy_acl_role_label_normal;
72960+ copy_acl_ip_label = &copy_acl_ip_label_normal;
72961+ copy_role_allowed_ip = &copy_role_allowed_ip_normal;
72962+ copy_role_transition = &copy_role_transition_normal;
72963+ copy_sprole_pw = &copy_sprole_pw_normal;
72964+ copy_gr_hash_struct = &copy_gr_hash_struct_normal;
72965+ copy_pointer_from_array = &copy_pointer_from_array_normal;
72966+ get_gr_arg_wrapper_size = &get_gr_arg_wrapper_size_normal;
72967+ }
72968+ pax_close_kernel();
72969+#endif
72970+
72971+ req_count = get_gr_arg_wrapper_size();
72972+
72973+ if (count != req_count) {
72974+ gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)req_count);
72975+ error = -EINVAL;
72976+ goto out;
72977+ }
72978+
72979+
72980+ if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
72981+ gr_auth_expires = 0;
72982+ gr_auth_attempts = 0;
72983+ }
72984+
72985+ error = copy_gr_arg_wrapper(buf, &uwrap);
72986+ if (error)
72987+ goto out;
72988+
72989+ error = copy_gr_arg(uwrap.arg, &gr_usermode);
72990+ if (error)
72991+ goto out;
72992+
72993+ if (gr_usermode.mode != GR_SPROLE && gr_usermode.mode != GR_SPROLEPAM &&
72994+ gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
72995+ time_after(gr_auth_expires, get_seconds())) {
72996+ error = -EBUSY;
72997+ goto out;
72998+ }
72999+
73000+ /* if non-root trying to do anything other than use a special role,
73001+ do not attempt authentication, do not count towards authentication
73002+ locking
73003+ */
73004+
73005+ if (gr_usermode.mode != GR_SPROLE && gr_usermode.mode != GR_STATUS &&
73006+ gr_usermode.mode != GR_UNSPROLE && gr_usermode.mode != GR_SPROLEPAM &&
73007+ gr_is_global_nonroot(current_uid())) {
73008+ error = -EPERM;
73009+ goto out;
73010+ }
73011+
73012+ /* ensure pw and special role name are null terminated */
73013+
73014+ gr_usermode.pw[GR_PW_LEN - 1] = '\0';
73015+ gr_usermode.sp_role[GR_SPROLE_LEN - 1] = '\0';
73016+
73017+ /* Okay.
73018+ * We have our enough of the argument structure..(we have yet
73019+ * to copy_from_user the tables themselves) . Copy the tables
73020+ * only if we need them, i.e. for loading operations. */
73021+
73022+ switch (gr_usermode.mode) {
73023+ case GR_STATUS:
73024+ if (gr_acl_is_enabled()) {
73025+ error = 1;
73026+ if (!gr_check_secure_terminal(current))
73027+ error = 3;
73028+ } else
73029+ error = 2;
73030+ goto out;
73031+ case GR_SHUTDOWN:
73032+ if (gr_acl_is_enabled() && !(chkpw(&gr_usermode, (unsigned char *)&gr_system_salt, (unsigned char *)&gr_system_sum))) {
73033+ stop_machine(gr_rbac_disable, NULL, NULL);
73034+ free_variables(false);
73035+ memset(&gr_usermode, 0, sizeof(gr_usermode));
73036+ memset(&gr_system_salt, 0, sizeof(gr_system_salt));
73037+ memset(&gr_system_sum, 0, sizeof(gr_system_sum));
73038+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
73039+ } else if (gr_acl_is_enabled()) {
73040+ gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
73041+ error = -EPERM;
73042+ } else {
73043+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
73044+ error = -EAGAIN;
73045+ }
73046+ break;
73047+ case GR_ENABLE:
73048+ if (!gr_acl_is_enabled() && !(error2 = gracl_init(&gr_usermode)))
73049+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
73050+ else {
73051+ if (gr_acl_is_enabled())
73052+ error = -EAGAIN;
73053+ else
73054+ error = error2;
73055+ gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
73056+ }
73057+ break;
73058+ case GR_OLDRELOAD:
73059+ oldmode = 1;
73060+ case GR_RELOAD:
73061+ if (!gr_acl_is_enabled()) {
73062+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
73063+ error = -EAGAIN;
73064+ } else if (!(chkpw(&gr_usermode, (unsigned char *)&gr_system_salt, (unsigned char *)&gr_system_sum))) {
73065+ error2 = gracl_reload(&gr_usermode, oldmode);
73066+ if (!error2)
73067+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
73068+ else {
73069+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
73070+ error = error2;
73071+ }
73072+ } else {
73073+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
73074+ error = -EPERM;
73075+ }
73076+ break;
73077+ case GR_SEGVMOD:
73078+ if (unlikely(!gr_acl_is_enabled())) {
73079+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
73080+ error = -EAGAIN;
73081+ break;
73082+ }
73083+
73084+ if (!(chkpw(&gr_usermode, (unsigned char *)&gr_system_salt, (unsigned char *)&gr_system_sum))) {
73085+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
73086+ if (gr_usermode.segv_device && gr_usermode.segv_inode) {
73087+ struct acl_subject_label *segvacl;
73088+ segvacl =
73089+ lookup_acl_subj_label(gr_usermode.segv_inode,
73090+ gr_usermode.segv_device,
73091+ current->role);
73092+ if (segvacl) {
73093+ segvacl->crashes = 0;
73094+ segvacl->expires = 0;
73095+ }
73096+ } else if (gr_find_uid(gr_usermode.segv_uid) >= 0) {
73097+ gr_remove_uid(gr_usermode.segv_uid);
73098+ }
73099+ } else {
73100+ gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
73101+ error = -EPERM;
73102+ }
73103+ break;
73104+ case GR_SPROLE:
73105+ case GR_SPROLEPAM:
73106+ if (unlikely(!gr_acl_is_enabled())) {
73107+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
73108+ error = -EAGAIN;
73109+ break;
73110+ }
73111+
73112+ if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
73113+ current->role->expires = 0;
73114+ current->role->auth_attempts = 0;
73115+ }
73116+
73117+ if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
73118+ time_after(current->role->expires, get_seconds())) {
73119+ error = -EBUSY;
73120+ goto out;
73121+ }
73122+
73123+ if (lookup_special_role_auth
73124+ (gr_usermode.mode, gr_usermode.sp_role, &sprole_salt, &sprole_sum)
73125+ && ((!sprole_salt && !sprole_sum)
73126+ || !(chkpw(&gr_usermode, sprole_salt, sprole_sum)))) {
73127+ char *p = "";
73128+ assign_special_role(gr_usermode.sp_role);
73129+ read_lock(&tasklist_lock);
73130+ if (current->real_parent)
73131+ p = current->real_parent->role->rolename;
73132+ read_unlock(&tasklist_lock);
73133+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
73134+ p, acl_sp_role_value);
73135+ } else {
73136+ gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode.sp_role);
73137+ error = -EPERM;
73138+ if(!(current->role->auth_attempts++))
73139+ current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
73140+
73141+ goto out;
73142+ }
73143+ break;
73144+ case GR_UNSPROLE:
73145+ if (unlikely(!gr_acl_is_enabled())) {
73146+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
73147+ error = -EAGAIN;
73148+ break;
73149+ }
73150+
73151+ if (current->role->roletype & GR_ROLE_SPECIAL) {
73152+ char *p = "";
73153+ int i = 0;
73154+
73155+ read_lock(&tasklist_lock);
73156+ if (current->real_parent) {
73157+ p = current->real_parent->role->rolename;
73158+ i = current->real_parent->acl_role_id;
73159+ }
73160+ read_unlock(&tasklist_lock);
73161+
73162+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
73163+ gr_set_acls(1);
73164+ } else {
73165+ error = -EPERM;
73166+ goto out;
73167+ }
73168+ break;
73169+ default:
73170+ gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode.mode);
73171+ error = -EINVAL;
73172+ break;
73173+ }
73174+
73175+ if (error != -EPERM)
73176+ goto out;
73177+
73178+ if(!(gr_auth_attempts++))
73179+ gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
73180+
73181+ out:
73182+ mutex_unlock(&gr_dev_mutex);
73183+
73184+ if (!error)
73185+ error = req_count;
73186+
73187+ return error;
73188+}
73189+
73190+int
73191+gr_set_acls(const int type)
73192+{
73193+ struct task_struct *task, *task2;
73194+ struct acl_role_label *role = current->role;
73195+ struct acl_subject_label *subj;
73196+ __u16 acl_role_id = current->acl_role_id;
73197+ const struct cred *cred;
73198+ int ret;
73199+
73200+ rcu_read_lock();
73201+ read_lock(&tasklist_lock);
73202+ read_lock(&grsec_exec_file_lock);
73203+ do_each_thread(task2, task) {
73204+ /* check to see if we're called from the exit handler,
73205+ if so, only replace ACLs that have inherited the admin
73206+ ACL */
73207+
73208+ if (type && (task->role != role ||
73209+ task->acl_role_id != acl_role_id))
73210+ continue;
73211+
73212+ task->acl_role_id = 0;
73213+ task->acl_sp_role = 0;
73214+ task->inherited = 0;
73215+
73216+ if (task->exec_file) {
73217+ cred = __task_cred(task);
73218+ task->role = __lookup_acl_role_label(polstate, task, GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid));
73219+ subj = __gr_get_subject_for_task(polstate, task, NULL);
73220+ if (subj == NULL) {
73221+ ret = -EINVAL;
73222+ read_unlock(&grsec_exec_file_lock);
73223+ read_unlock(&tasklist_lock);
73224+ rcu_read_unlock();
73225+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task_pid_nr(task));
73226+ return ret;
73227+ }
73228+ __gr_apply_subject_to_task(polstate, task, subj);
73229+ } else {
73230+ // it's a kernel process
73231+ task->role = polstate->kernel_role;
73232+ task->acl = polstate->kernel_role->root_label;
73233+#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
73234+ task->acl->mode &= ~GR_PROCFIND;
73235+#endif
73236+ }
73237+ } while_each_thread(task2, task);
73238+ read_unlock(&grsec_exec_file_lock);
73239+ read_unlock(&tasklist_lock);
73240+ rcu_read_unlock();
73241+
73242+ return 0;
73243+}
73244diff --git a/grsecurity/gracl_res.c b/grsecurity/gracl_res.c
73245new file mode 100644
73246index 0000000..39645c9
73247--- /dev/null
73248+++ b/grsecurity/gracl_res.c
73249@@ -0,0 +1,68 @@
73250+#include <linux/kernel.h>
73251+#include <linux/sched.h>
73252+#include <linux/gracl.h>
73253+#include <linux/grinternal.h>
73254+
73255+static const char *restab_log[] = {
73256+ [RLIMIT_CPU] = "RLIMIT_CPU",
73257+ [RLIMIT_FSIZE] = "RLIMIT_FSIZE",
73258+ [RLIMIT_DATA] = "RLIMIT_DATA",
73259+ [RLIMIT_STACK] = "RLIMIT_STACK",
73260+ [RLIMIT_CORE] = "RLIMIT_CORE",
73261+ [RLIMIT_RSS] = "RLIMIT_RSS",
73262+ [RLIMIT_NPROC] = "RLIMIT_NPROC",
73263+ [RLIMIT_NOFILE] = "RLIMIT_NOFILE",
73264+ [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
73265+ [RLIMIT_AS] = "RLIMIT_AS",
73266+ [RLIMIT_LOCKS] = "RLIMIT_LOCKS",
73267+ [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
73268+ [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
73269+ [RLIMIT_NICE] = "RLIMIT_NICE",
73270+ [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
73271+ [RLIMIT_RTTIME] = "RLIMIT_RTTIME",
73272+ [GR_CRASH_RES] = "RLIMIT_CRASH"
73273+};
73274+
73275+void
73276+gr_log_resource(const struct task_struct *task,
73277+ const int res, const unsigned long wanted, const int gt)
73278+{
73279+ const struct cred *cred;
73280+ unsigned long rlim;
73281+
73282+ if (!gr_acl_is_enabled() && !grsec_resource_logging)
73283+ return;
73284+
73285+ // not yet supported resource
73286+ if (unlikely(!restab_log[res]))
73287+ return;
73288+
73289+ if (res == RLIMIT_CPU || res == RLIMIT_RTTIME)
73290+ rlim = task_rlimit_max(task, res);
73291+ else
73292+ rlim = task_rlimit(task, res);
73293+
73294+ if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim)))
73295+ return;
73296+
73297+ rcu_read_lock();
73298+ cred = __task_cred(task);
73299+
73300+ if (res == RLIMIT_NPROC &&
73301+ (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) ||
73302+ cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
73303+ goto out_rcu_unlock;
73304+ else if (res == RLIMIT_MEMLOCK &&
73305+ cap_raised(cred->cap_effective, CAP_IPC_LOCK))
73306+ goto out_rcu_unlock;
73307+ else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE))
73308+ goto out_rcu_unlock;
73309+ rcu_read_unlock();
73310+
73311+ gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim);
73312+
73313+ return;
73314+out_rcu_unlock:
73315+ rcu_read_unlock();
73316+ return;
73317+}
73318diff --git a/grsecurity/gracl_segv.c b/grsecurity/gracl_segv.c
73319new file mode 100644
73320index 0000000..2040e61
73321--- /dev/null
73322+++ b/grsecurity/gracl_segv.c
73323@@ -0,0 +1,313 @@
73324+#include <linux/kernel.h>
73325+#include <linux/mm.h>
73326+#include <asm/uaccess.h>
73327+#include <asm/errno.h>
73328+#include <asm/mman.h>
73329+#include <net/sock.h>
73330+#include <linux/file.h>
73331+#include <linux/fs.h>
73332+#include <linux/net.h>
73333+#include <linux/in.h>
73334+#include <linux/slab.h>
73335+#include <linux/types.h>
73336+#include <linux/sched.h>
73337+#include <linux/timer.h>
73338+#include <linux/gracl.h>
73339+#include <linux/grsecurity.h>
73340+#include <linux/grinternal.h>
73341+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
73342+#include <linux/magic.h>
73343+#include <linux/pagemap.h>
73344+#include "../fs/btrfs/async-thread.h"
73345+#include "../fs/btrfs/ctree.h"
73346+#include "../fs/btrfs/btrfs_inode.h"
73347+#endif
73348+
73349+static struct crash_uid *uid_set;
73350+static unsigned short uid_used;
73351+static DEFINE_SPINLOCK(gr_uid_lock);
73352+extern rwlock_t gr_inode_lock;
73353+extern struct acl_subject_label *
73354+ lookup_acl_subj_label(const ino_t inode, const dev_t dev,
73355+ struct acl_role_label *role);
73356+
73357+static inline dev_t __get_dev(const struct dentry *dentry)
73358+{
73359+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
73360+ if (dentry->d_sb->s_magic == BTRFS_SUPER_MAGIC)
73361+ return BTRFS_I(dentry->d_inode)->root->anon_dev;
73362+ else
73363+#endif
73364+ return dentry->d_sb->s_dev;
73365+}
73366+
73367+int
73368+gr_init_uidset(void)
73369+{
73370+ uid_set =
73371+ kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
73372+ uid_used = 0;
73373+
73374+ return uid_set ? 1 : 0;
73375+}
73376+
73377+void
73378+gr_free_uidset(void)
73379+{
73380+ if (uid_set) {
73381+ struct crash_uid *tmpset;
73382+ spin_lock(&gr_uid_lock);
73383+ tmpset = uid_set;
73384+ uid_set = NULL;
73385+ uid_used = 0;
73386+ spin_unlock(&gr_uid_lock);
73387+ if (tmpset)
73388+ kfree(tmpset);
73389+ }
73390+
73391+ return;
73392+}
73393+
73394+int
73395+gr_find_uid(const uid_t uid)
73396+{
73397+ struct crash_uid *tmp = uid_set;
73398+ uid_t buid;
73399+ int low = 0, high = uid_used - 1, mid;
73400+
73401+ while (high >= low) {
73402+ mid = (low + high) >> 1;
73403+ buid = tmp[mid].uid;
73404+ if (buid == uid)
73405+ return mid;
73406+ if (buid > uid)
73407+ high = mid - 1;
73408+ if (buid < uid)
73409+ low = mid + 1;
73410+ }
73411+
73412+ return -1;
73413+}
73414+
73415+static __inline__ void
73416+gr_insertsort(void)
73417+{
73418+ unsigned short i, j;
73419+ struct crash_uid index;
73420+
73421+ for (i = 1; i < uid_used; i++) {
73422+ index = uid_set[i];
73423+ j = i;
73424+ while ((j > 0) && uid_set[j - 1].uid > index.uid) {
73425+ uid_set[j] = uid_set[j - 1];
73426+ j--;
73427+ }
73428+ uid_set[j] = index;
73429+ }
73430+
73431+ return;
73432+}
73433+
73434+static __inline__ void
73435+gr_insert_uid(const kuid_t kuid, const unsigned long expires)
73436+{
73437+ int loc;
73438+ uid_t uid = GR_GLOBAL_UID(kuid);
73439+
73440+ if (uid_used == GR_UIDTABLE_MAX)
73441+ return;
73442+
73443+ loc = gr_find_uid(uid);
73444+
73445+ if (loc >= 0) {
73446+ uid_set[loc].expires = expires;
73447+ return;
73448+ }
73449+
73450+ uid_set[uid_used].uid = uid;
73451+ uid_set[uid_used].expires = expires;
73452+ uid_used++;
73453+
73454+ gr_insertsort();
73455+
73456+ return;
73457+}
73458+
73459+void
73460+gr_remove_uid(const unsigned short loc)
73461+{
73462+ unsigned short i;
73463+
73464+ for (i = loc + 1; i < uid_used; i++)
73465+ uid_set[i - 1] = uid_set[i];
73466+
73467+ uid_used--;
73468+
73469+ return;
73470+}
73471+
73472+int
73473+gr_check_crash_uid(const kuid_t kuid)
73474+{
73475+ int loc;
73476+ int ret = 0;
73477+ uid_t uid;
73478+
73479+ if (unlikely(!gr_acl_is_enabled()))
73480+ return 0;
73481+
73482+ uid = GR_GLOBAL_UID(kuid);
73483+
73484+ spin_lock(&gr_uid_lock);
73485+ loc = gr_find_uid(uid);
73486+
73487+ if (loc < 0)
73488+ goto out_unlock;
73489+
73490+ if (time_before_eq(uid_set[loc].expires, get_seconds()))
73491+ gr_remove_uid(loc);
73492+ else
73493+ ret = 1;
73494+
73495+out_unlock:
73496+ spin_unlock(&gr_uid_lock);
73497+ return ret;
73498+}
73499+
73500+static __inline__ int
73501+proc_is_setxid(const struct cred *cred)
73502+{
73503+ if (!uid_eq(cred->uid, cred->euid) || !uid_eq(cred->uid, cred->suid) ||
73504+ !uid_eq(cred->uid, cred->fsuid))
73505+ return 1;
73506+ if (!gid_eq(cred->gid, cred->egid) || !gid_eq(cred->gid, cred->sgid) ||
73507+ !gid_eq(cred->gid, cred->fsgid))
73508+ return 1;
73509+
73510+ return 0;
73511+}
73512+
73513+extern int gr_fake_force_sig(int sig, struct task_struct *t);
73514+
73515+void
73516+gr_handle_crash(struct task_struct *task, const int sig)
73517+{
73518+ struct acl_subject_label *curr;
73519+ struct task_struct *tsk, *tsk2;
73520+ const struct cred *cred;
73521+ const struct cred *cred2;
73522+
73523+ if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
73524+ return;
73525+
73526+ if (unlikely(!gr_acl_is_enabled()))
73527+ return;
73528+
73529+ curr = task->acl;
73530+
73531+ if (!(curr->resmask & (1U << GR_CRASH_RES)))
73532+ return;
73533+
73534+ if (time_before_eq(curr->expires, get_seconds())) {
73535+ curr->expires = 0;
73536+ curr->crashes = 0;
73537+ }
73538+
73539+ curr->crashes++;
73540+
73541+ if (!curr->expires)
73542+ curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
73543+
73544+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
73545+ time_after(curr->expires, get_seconds())) {
73546+ rcu_read_lock();
73547+ cred = __task_cred(task);
73548+ if (gr_is_global_nonroot(cred->uid) && proc_is_setxid(cred)) {
73549+ gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
73550+ spin_lock(&gr_uid_lock);
73551+ gr_insert_uid(cred->uid, curr->expires);
73552+ spin_unlock(&gr_uid_lock);
73553+ curr->expires = 0;
73554+ curr->crashes = 0;
73555+ read_lock(&tasklist_lock);
73556+ do_each_thread(tsk2, tsk) {
73557+ cred2 = __task_cred(tsk);
73558+ if (tsk != task && uid_eq(cred2->uid, cred->uid))
73559+ gr_fake_force_sig(SIGKILL, tsk);
73560+ } while_each_thread(tsk2, tsk);
73561+ read_unlock(&tasklist_lock);
73562+ } else {
73563+ gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
73564+ read_lock(&tasklist_lock);
73565+ read_lock(&grsec_exec_file_lock);
73566+ do_each_thread(tsk2, tsk) {
73567+ if (likely(tsk != task)) {
73568+ // if this thread has the same subject as the one that triggered
73569+ // RES_CRASH and it's the same binary, kill it
73570+ if (tsk->acl == task->acl && gr_is_same_file(tsk->exec_file, task->exec_file))
73571+ gr_fake_force_sig(SIGKILL, tsk);
73572+ }
73573+ } while_each_thread(tsk2, tsk);
73574+ read_unlock(&grsec_exec_file_lock);
73575+ read_unlock(&tasklist_lock);
73576+ }
73577+ rcu_read_unlock();
73578+ }
73579+
73580+ return;
73581+}
73582+
73583+int
73584+gr_check_crash_exec(const struct file *filp)
73585+{
73586+ struct acl_subject_label *curr;
73587+
73588+ if (unlikely(!gr_acl_is_enabled()))
73589+ return 0;
73590+
73591+ read_lock(&gr_inode_lock);
73592+ curr = lookup_acl_subj_label(filp->f_path.dentry->d_inode->i_ino,
73593+ __get_dev(filp->f_path.dentry),
73594+ current->role);
73595+ read_unlock(&gr_inode_lock);
73596+
73597+ if (!curr || !(curr->resmask & (1U << GR_CRASH_RES)) ||
73598+ (!curr->crashes && !curr->expires))
73599+ return 0;
73600+
73601+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
73602+ time_after(curr->expires, get_seconds()))
73603+ return 1;
73604+ else if (time_before_eq(curr->expires, get_seconds())) {
73605+ curr->crashes = 0;
73606+ curr->expires = 0;
73607+ }
73608+
73609+ return 0;
73610+}
73611+
73612+void
73613+gr_handle_alertkill(struct task_struct *task)
73614+{
73615+ struct acl_subject_label *curracl;
73616+ __u32 curr_ip;
73617+ struct task_struct *p, *p2;
73618+
73619+ if (unlikely(!gr_acl_is_enabled()))
73620+ return;
73621+
73622+ curracl = task->acl;
73623+ curr_ip = task->signal->curr_ip;
73624+
73625+ if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
73626+ read_lock(&tasklist_lock);
73627+ do_each_thread(p2, p) {
73628+ if (p->signal->curr_ip == curr_ip)
73629+ gr_fake_force_sig(SIGKILL, p);
73630+ } while_each_thread(p2, p);
73631+ read_unlock(&tasklist_lock);
73632+ } else if (curracl->mode & GR_KILLPROC)
73633+ gr_fake_force_sig(SIGKILL, task);
73634+
73635+ return;
73636+}
73637diff --git a/grsecurity/gracl_shm.c b/grsecurity/gracl_shm.c
73638new file mode 100644
73639index 0000000..98011b0
73640--- /dev/null
73641+++ b/grsecurity/gracl_shm.c
73642@@ -0,0 +1,40 @@
73643+#include <linux/kernel.h>
73644+#include <linux/mm.h>
73645+#include <linux/sched.h>
73646+#include <linux/file.h>
73647+#include <linux/ipc.h>
73648+#include <linux/gracl.h>
73649+#include <linux/grsecurity.h>
73650+#include <linux/grinternal.h>
73651+
73652+int
73653+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
73654+ const time_t shm_createtime, const kuid_t cuid, const int shmid)
73655+{
73656+ struct task_struct *task;
73657+
73658+ if (!gr_acl_is_enabled())
73659+ return 1;
73660+
73661+ rcu_read_lock();
73662+ read_lock(&tasklist_lock);
73663+
73664+ task = find_task_by_vpid(shm_cprid);
73665+
73666+ if (unlikely(!task))
73667+ task = find_task_by_vpid(shm_lapid);
73668+
73669+ if (unlikely(task && (time_before_eq((unsigned long)task->start_time.tv_sec, (unsigned long)shm_createtime) ||
73670+ (task_pid_nr(task) == shm_lapid)) &&
73671+ (task->acl->mode & GR_PROTSHM) &&
73672+ (task->acl != current->acl))) {
73673+ read_unlock(&tasklist_lock);
73674+ rcu_read_unlock();
73675+ gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, GR_GLOBAL_UID(cuid), shm_cprid, shmid);
73676+ return 0;
73677+ }
73678+ read_unlock(&tasklist_lock);
73679+ rcu_read_unlock();
73680+
73681+ return 1;
73682+}
73683diff --git a/grsecurity/grsec_chdir.c b/grsecurity/grsec_chdir.c
73684new file mode 100644
73685index 0000000..bc0be01
73686--- /dev/null
73687+++ b/grsecurity/grsec_chdir.c
73688@@ -0,0 +1,19 @@
73689+#include <linux/kernel.h>
73690+#include <linux/sched.h>
73691+#include <linux/fs.h>
73692+#include <linux/file.h>
73693+#include <linux/grsecurity.h>
73694+#include <linux/grinternal.h>
73695+
73696+void
73697+gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
73698+{
73699+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
73700+ if ((grsec_enable_chdir && grsec_enable_group &&
73701+ in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
73702+ !grsec_enable_group)) {
73703+ gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
73704+ }
73705+#endif
73706+ return;
73707+}
73708diff --git a/grsecurity/grsec_chroot.c b/grsecurity/grsec_chroot.c
73709new file mode 100644
73710index 0000000..651d6c2
73711--- /dev/null
73712+++ b/grsecurity/grsec_chroot.c
73713@@ -0,0 +1,370 @@
73714+#include <linux/kernel.h>
73715+#include <linux/module.h>
73716+#include <linux/sched.h>
73717+#include <linux/file.h>
73718+#include <linux/fs.h>
73719+#include <linux/mount.h>
73720+#include <linux/types.h>
73721+#include "../fs/mount.h"
73722+#include <linux/grsecurity.h>
73723+#include <linux/grinternal.h>
73724+
73725+#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
73726+int gr_init_ran;
73727+#endif
73728+
73729+void gr_set_chroot_entries(struct task_struct *task, const struct path *path)
73730+{
73731+#ifdef CONFIG_GRKERNSEC
73732+ if (task_pid_nr(task) > 1 && path->dentry != init_task.fs->root.dentry &&
73733+ path->dentry != task->nsproxy->mnt_ns->root->mnt.mnt_root
73734+#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
73735+ && gr_init_ran
73736+#endif
73737+ )
73738+ task->gr_is_chrooted = 1;
73739+ else {
73740+#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
73741+ if (task_pid_nr(task) == 1 && !gr_init_ran)
73742+ gr_init_ran = 1;
73743+#endif
73744+ task->gr_is_chrooted = 0;
73745+ }
73746+
73747+ task->gr_chroot_dentry = path->dentry;
73748+#endif
73749+ return;
73750+}
73751+
73752+void gr_clear_chroot_entries(struct task_struct *task)
73753+{
73754+#ifdef CONFIG_GRKERNSEC
73755+ task->gr_is_chrooted = 0;
73756+ task->gr_chroot_dentry = NULL;
73757+#endif
73758+ return;
73759+}
73760+
73761+int
73762+gr_handle_chroot_unix(const pid_t pid)
73763+{
73764+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
73765+ struct task_struct *p;
73766+
73767+ if (unlikely(!grsec_enable_chroot_unix))
73768+ return 1;
73769+
73770+ if (likely(!proc_is_chrooted(current)))
73771+ return 1;
73772+
73773+ rcu_read_lock();
73774+ read_lock(&tasklist_lock);
73775+ p = find_task_by_vpid_unrestricted(pid);
73776+ if (unlikely(p && !have_same_root(current, p))) {
73777+ read_unlock(&tasklist_lock);
73778+ rcu_read_unlock();
73779+ gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
73780+ return 0;
73781+ }
73782+ read_unlock(&tasklist_lock);
73783+ rcu_read_unlock();
73784+#endif
73785+ return 1;
73786+}
73787+
73788+int
73789+gr_handle_chroot_nice(void)
73790+{
73791+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
73792+ if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
73793+ gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
73794+ return -EPERM;
73795+ }
73796+#endif
73797+ return 0;
73798+}
73799+
73800+int
73801+gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
73802+{
73803+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
73804+ if (grsec_enable_chroot_nice && (niceval < task_nice(p))
73805+ && proc_is_chrooted(current)) {
73806+ gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, task_pid_nr(p));
73807+ return -EACCES;
73808+ }
73809+#endif
73810+ return 0;
73811+}
73812+
73813+int
73814+gr_handle_chroot_fowner(struct pid *pid, enum pid_type type)
73815+{
73816+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
73817+ struct task_struct *p;
73818+ int ret = 0;
73819+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid)
73820+ return ret;
73821+
73822+ read_lock(&tasklist_lock);
73823+ do_each_pid_task(pid, type, p) {
73824+ if (!have_same_root(current, p)) {
73825+ ret = 1;
73826+ goto out;
73827+ }
73828+ } while_each_pid_task(pid, type, p);
73829+out:
73830+ read_unlock(&tasklist_lock);
73831+ return ret;
73832+#endif
73833+ return 0;
73834+}
73835+
73836+int
73837+gr_pid_is_chrooted(struct task_struct *p)
73838+{
73839+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
73840+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
73841+ return 0;
73842+
73843+ if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
73844+ !have_same_root(current, p)) {
73845+ return 1;
73846+ }
73847+#endif
73848+ return 0;
73849+}
73850+
73851+EXPORT_SYMBOL_GPL(gr_pid_is_chrooted);
73852+
73853+#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
73854+int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
73855+{
73856+ struct path path, currentroot;
73857+ int ret = 0;
73858+
73859+ path.dentry = (struct dentry *)u_dentry;
73860+ path.mnt = (struct vfsmount *)u_mnt;
73861+ get_fs_root(current->fs, &currentroot);
73862+ if (path_is_under(&path, &currentroot))
73863+ ret = 1;
73864+ path_put(&currentroot);
73865+
73866+ return ret;
73867+}
73868+#endif
73869+
73870+int
73871+gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
73872+{
73873+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
73874+ if (!grsec_enable_chroot_fchdir)
73875+ return 1;
73876+
73877+ if (!proc_is_chrooted(current))
73878+ return 1;
73879+ else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
73880+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
73881+ return 0;
73882+ }
73883+#endif
73884+ return 1;
73885+}
73886+
73887+int
73888+gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
73889+ const time_t shm_createtime)
73890+{
73891+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
73892+ struct task_struct *p;
73893+ time_t starttime;
73894+
73895+ if (unlikely(!grsec_enable_chroot_shmat))
73896+ return 1;
73897+
73898+ if (likely(!proc_is_chrooted(current)))
73899+ return 1;
73900+
73901+ rcu_read_lock();
73902+ read_lock(&tasklist_lock);
73903+
73904+ if ((p = find_task_by_vpid_unrestricted(shm_cprid))) {
73905+ starttime = p->start_time.tv_sec;
73906+ if (time_before_eq((unsigned long)starttime, (unsigned long)shm_createtime)) {
73907+ if (have_same_root(current, p)) {
73908+ goto allow;
73909+ } else {
73910+ read_unlock(&tasklist_lock);
73911+ rcu_read_unlock();
73912+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
73913+ return 0;
73914+ }
73915+ }
73916+ /* creator exited, pid reuse, fall through to next check */
73917+ }
73918+ if ((p = find_task_by_vpid_unrestricted(shm_lapid))) {
73919+ if (unlikely(!have_same_root(current, p))) {
73920+ read_unlock(&tasklist_lock);
73921+ rcu_read_unlock();
73922+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
73923+ return 0;
73924+ }
73925+ }
73926+
73927+allow:
73928+ read_unlock(&tasklist_lock);
73929+ rcu_read_unlock();
73930+#endif
73931+ return 1;
73932+}
73933+
73934+void
73935+gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
73936+{
73937+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
73938+ if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
73939+ gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
73940+#endif
73941+ return;
73942+}
73943+
73944+int
73945+gr_handle_chroot_mknod(const struct dentry *dentry,
73946+ const struct vfsmount *mnt, const int mode)
73947+{
73948+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
73949+ if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) &&
73950+ proc_is_chrooted(current)) {
73951+ gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
73952+ return -EPERM;
73953+ }
73954+#endif
73955+ return 0;
73956+}
73957+
73958+int
73959+gr_handle_chroot_mount(const struct dentry *dentry,
73960+ const struct vfsmount *mnt, const char *dev_name)
73961+{
73962+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
73963+ if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
73964+ gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name ? dev_name : "none", dentry, mnt);
73965+ return -EPERM;
73966+ }
73967+#endif
73968+ return 0;
73969+}
73970+
73971+int
73972+gr_handle_chroot_pivot(void)
73973+{
73974+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
73975+ if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
73976+ gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
73977+ return -EPERM;
73978+ }
73979+#endif
73980+ return 0;
73981+}
73982+
73983+int
73984+gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
73985+{
73986+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
73987+ if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
73988+ !gr_is_outside_chroot(dentry, mnt)) {
73989+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
73990+ return -EPERM;
73991+ }
73992+#endif
73993+ return 0;
73994+}
73995+
73996+extern const char *captab_log[];
73997+extern int captab_log_entries;
73998+
73999+int
74000+gr_task_chroot_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
74001+{
74002+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
74003+ if (grsec_enable_chroot_caps && proc_is_chrooted(task)) {
74004+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
74005+ if (cap_raised(chroot_caps, cap)) {
74006+ if (cap_raised(cred->cap_effective, cap) && cap < captab_log_entries) {
74007+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_CHROOT_MSG, task, captab_log[cap]);
74008+ }
74009+ return 0;
74010+ }
74011+ }
74012+#endif
74013+ return 1;
74014+}
74015+
74016+int
74017+gr_chroot_is_capable(const int cap)
74018+{
74019+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
74020+ return gr_task_chroot_is_capable(current, current_cred(), cap);
74021+#endif
74022+ return 1;
74023+}
74024+
74025+int
74026+gr_task_chroot_is_capable_nolog(const struct task_struct *task, const int cap)
74027+{
74028+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
74029+ if (grsec_enable_chroot_caps && proc_is_chrooted(task)) {
74030+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
74031+ if (cap_raised(chroot_caps, cap)) {
74032+ return 0;
74033+ }
74034+ }
74035+#endif
74036+ return 1;
74037+}
74038+
74039+int
74040+gr_chroot_is_capable_nolog(const int cap)
74041+{
74042+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
74043+ return gr_task_chroot_is_capable_nolog(current, cap);
74044+#endif
74045+ return 1;
74046+}
74047+
74048+int
74049+gr_handle_chroot_sysctl(const int op)
74050+{
74051+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
74052+ if (grsec_enable_chroot_sysctl && (op & MAY_WRITE) &&
74053+ proc_is_chrooted(current))
74054+ return -EACCES;
74055+#endif
74056+ return 0;
74057+}
74058+
74059+void
74060+gr_handle_chroot_chdir(const struct path *path)
74061+{
74062+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
74063+ if (grsec_enable_chroot_chdir)
74064+ set_fs_pwd(current->fs, path);
74065+#endif
74066+ return;
74067+}
74068+
74069+int
74070+gr_handle_chroot_chmod(const struct dentry *dentry,
74071+ const struct vfsmount *mnt, const int mode)
74072+{
74073+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
74074+ /* allow chmod +s on directories, but not files */
74075+ if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) &&
74076+ ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
74077+ proc_is_chrooted(current)) {
74078+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
74079+ return -EPERM;
74080+ }
74081+#endif
74082+ return 0;
74083+}
74084diff --git a/grsecurity/grsec_disabled.c b/grsecurity/grsec_disabled.c
74085new file mode 100644
74086index 0000000..4d6fce8
74087--- /dev/null
74088+++ b/grsecurity/grsec_disabled.c
74089@@ -0,0 +1,433 @@
74090+#include <linux/kernel.h>
74091+#include <linux/module.h>
74092+#include <linux/sched.h>
74093+#include <linux/file.h>
74094+#include <linux/fs.h>
74095+#include <linux/kdev_t.h>
74096+#include <linux/net.h>
74097+#include <linux/in.h>
74098+#include <linux/ip.h>
74099+#include <linux/skbuff.h>
74100+#include <linux/sysctl.h>
74101+
74102+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
74103+void
74104+pax_set_initial_flags(struct linux_binprm *bprm)
74105+{
74106+ return;
74107+}
74108+#endif
74109+
74110+#ifdef CONFIG_SYSCTL
74111+__u32
74112+gr_handle_sysctl(const struct ctl_table * table, const int op)
74113+{
74114+ return 0;
74115+}
74116+#endif
74117+
74118+#ifdef CONFIG_TASKSTATS
74119+int gr_is_taskstats_denied(int pid)
74120+{
74121+ return 0;
74122+}
74123+#endif
74124+
74125+int
74126+gr_acl_is_enabled(void)
74127+{
74128+ return 0;
74129+}
74130+
74131+void
74132+gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
74133+{
74134+ return;
74135+}
74136+
74137+int
74138+gr_handle_rawio(const struct inode *inode)
74139+{
74140+ return 0;
74141+}
74142+
74143+void
74144+gr_acl_handle_psacct(struct task_struct *task, const long code)
74145+{
74146+ return;
74147+}
74148+
74149+int
74150+gr_handle_ptrace(struct task_struct *task, const long request)
74151+{
74152+ return 0;
74153+}
74154+
74155+int
74156+gr_handle_proc_ptrace(struct task_struct *task)
74157+{
74158+ return 0;
74159+}
74160+
74161+int
74162+gr_set_acls(const int type)
74163+{
74164+ return 0;
74165+}
74166+
74167+int
74168+gr_check_hidden_task(const struct task_struct *tsk)
74169+{
74170+ return 0;
74171+}
74172+
74173+int
74174+gr_check_protected_task(const struct task_struct *task)
74175+{
74176+ return 0;
74177+}
74178+
74179+int
74180+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
74181+{
74182+ return 0;
74183+}
74184+
74185+void
74186+gr_copy_label(struct task_struct *tsk)
74187+{
74188+ return;
74189+}
74190+
74191+void
74192+gr_set_pax_flags(struct task_struct *task)
74193+{
74194+ return;
74195+}
74196+
74197+int
74198+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
74199+ const int unsafe_share)
74200+{
74201+ return 0;
74202+}
74203+
74204+void
74205+gr_handle_delete(const ino_t ino, const dev_t dev)
74206+{
74207+ return;
74208+}
74209+
74210+void
74211+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
74212+{
74213+ return;
74214+}
74215+
74216+void
74217+gr_handle_crash(struct task_struct *task, const int sig)
74218+{
74219+ return;
74220+}
74221+
74222+int
74223+gr_check_crash_exec(const struct file *filp)
74224+{
74225+ return 0;
74226+}
74227+
74228+int
74229+gr_check_crash_uid(const kuid_t uid)
74230+{
74231+ return 0;
74232+}
74233+
74234+void
74235+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
74236+ struct dentry *old_dentry,
74237+ struct dentry *new_dentry,
74238+ struct vfsmount *mnt, const __u8 replace)
74239+{
74240+ return;
74241+}
74242+
74243+int
74244+gr_search_socket(const int family, const int type, const int protocol)
74245+{
74246+ return 1;
74247+}
74248+
74249+int
74250+gr_search_connectbind(const int mode, const struct socket *sock,
74251+ const struct sockaddr_in *addr)
74252+{
74253+ return 0;
74254+}
74255+
74256+void
74257+gr_handle_alertkill(struct task_struct *task)
74258+{
74259+ return;
74260+}
74261+
74262+__u32
74263+gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
74264+{
74265+ return 1;
74266+}
74267+
74268+__u32
74269+gr_acl_handle_hidden_file(const struct dentry * dentry,
74270+ const struct vfsmount * mnt)
74271+{
74272+ return 1;
74273+}
74274+
74275+__u32
74276+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
74277+ int acc_mode)
74278+{
74279+ return 1;
74280+}
74281+
74282+__u32
74283+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
74284+{
74285+ return 1;
74286+}
74287+
74288+__u32
74289+gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
74290+{
74291+ return 1;
74292+}
74293+
74294+int
74295+gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
74296+ unsigned int *vm_flags)
74297+{
74298+ return 1;
74299+}
74300+
74301+__u32
74302+gr_acl_handle_truncate(const struct dentry * dentry,
74303+ const struct vfsmount * mnt)
74304+{
74305+ return 1;
74306+}
74307+
74308+__u32
74309+gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
74310+{
74311+ return 1;
74312+}
74313+
74314+__u32
74315+gr_acl_handle_access(const struct dentry * dentry,
74316+ const struct vfsmount * mnt, const int fmode)
74317+{
74318+ return 1;
74319+}
74320+
74321+__u32
74322+gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
74323+ umode_t *mode)
74324+{
74325+ return 1;
74326+}
74327+
74328+__u32
74329+gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
74330+{
74331+ return 1;
74332+}
74333+
74334+__u32
74335+gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt)
74336+{
74337+ return 1;
74338+}
74339+
74340+__u32
74341+gr_acl_handle_removexattr(const struct dentry * dentry, const struct vfsmount * mnt)
74342+{
74343+ return 1;
74344+}
74345+
74346+void
74347+grsecurity_init(void)
74348+{
74349+ return;
74350+}
74351+
74352+umode_t gr_acl_umask(void)
74353+{
74354+ return 0;
74355+}
74356+
74357+__u32
74358+gr_acl_handle_mknod(const struct dentry * new_dentry,
74359+ const struct dentry * parent_dentry,
74360+ const struct vfsmount * parent_mnt,
74361+ const int mode)
74362+{
74363+ return 1;
74364+}
74365+
74366+__u32
74367+gr_acl_handle_mkdir(const struct dentry * new_dentry,
74368+ const struct dentry * parent_dentry,
74369+ const struct vfsmount * parent_mnt)
74370+{
74371+ return 1;
74372+}
74373+
74374+__u32
74375+gr_acl_handle_symlink(const struct dentry * new_dentry,
74376+ const struct dentry * parent_dentry,
74377+ const struct vfsmount * parent_mnt, const struct filename *from)
74378+{
74379+ return 1;
74380+}
74381+
74382+__u32
74383+gr_acl_handle_link(const struct dentry * new_dentry,
74384+ const struct dentry * parent_dentry,
74385+ const struct vfsmount * parent_mnt,
74386+ const struct dentry * old_dentry,
74387+ const struct vfsmount * old_mnt, const struct filename *to)
74388+{
74389+ return 1;
74390+}
74391+
74392+int
74393+gr_acl_handle_rename(const struct dentry *new_dentry,
74394+ const struct dentry *parent_dentry,
74395+ const struct vfsmount *parent_mnt,
74396+ const struct dentry *old_dentry,
74397+ const struct inode *old_parent_inode,
74398+ const struct vfsmount *old_mnt, const struct filename *newname)
74399+{
74400+ return 0;
74401+}
74402+
74403+int
74404+gr_acl_handle_filldir(const struct file *file, const char *name,
74405+ const int namelen, const ino_t ino)
74406+{
74407+ return 1;
74408+}
74409+
74410+int
74411+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
74412+ const time_t shm_createtime, const kuid_t cuid, const int shmid)
74413+{
74414+ return 1;
74415+}
74416+
74417+int
74418+gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
74419+{
74420+ return 0;
74421+}
74422+
74423+int
74424+gr_search_accept(const struct socket *sock)
74425+{
74426+ return 0;
74427+}
74428+
74429+int
74430+gr_search_listen(const struct socket *sock)
74431+{
74432+ return 0;
74433+}
74434+
74435+int
74436+gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
74437+{
74438+ return 0;
74439+}
74440+
74441+__u32
74442+gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
74443+{
74444+ return 1;
74445+}
74446+
74447+__u32
74448+gr_acl_handle_creat(const struct dentry * dentry,
74449+ const struct dentry * p_dentry,
74450+ const struct vfsmount * p_mnt, int open_flags, int acc_mode,
74451+ const int imode)
74452+{
74453+ return 1;
74454+}
74455+
74456+void
74457+gr_acl_handle_exit(void)
74458+{
74459+ return;
74460+}
74461+
74462+int
74463+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
74464+{
74465+ return 1;
74466+}
74467+
74468+void
74469+gr_set_role_label(const kuid_t uid, const kgid_t gid)
74470+{
74471+ return;
74472+}
74473+
74474+int
74475+gr_acl_handle_procpidmem(const struct task_struct *task)
74476+{
74477+ return 0;
74478+}
74479+
74480+int
74481+gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
74482+{
74483+ return 0;
74484+}
74485+
74486+int
74487+gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
74488+{
74489+ return 0;
74490+}
74491+
74492+int
74493+gr_check_user_change(kuid_t real, kuid_t effective, kuid_t fs)
74494+{
74495+ return 0;
74496+}
74497+
74498+int
74499+gr_check_group_change(kgid_t real, kgid_t effective, kgid_t fs)
74500+{
74501+ return 0;
74502+}
74503+
74504+int gr_acl_enable_at_secure(void)
74505+{
74506+ return 0;
74507+}
74508+
74509+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
74510+{
74511+ return dentry->d_sb->s_dev;
74512+}
74513+
74514+void gr_put_exec_file(struct task_struct *task)
74515+{
74516+ return;
74517+}
74518+
74519+#ifdef CONFIG_SECURITY
74520+EXPORT_SYMBOL_GPL(gr_check_user_change);
74521+EXPORT_SYMBOL_GPL(gr_check_group_change);
74522+#endif
74523diff --git a/grsecurity/grsec_exec.c b/grsecurity/grsec_exec.c
74524new file mode 100644
74525index 0000000..f35f454
74526--- /dev/null
74527+++ b/grsecurity/grsec_exec.c
74528@@ -0,0 +1,187 @@
74529+#include <linux/kernel.h>
74530+#include <linux/sched.h>
74531+#include <linux/file.h>
74532+#include <linux/binfmts.h>
74533+#include <linux/fs.h>
74534+#include <linux/types.h>
74535+#include <linux/grdefs.h>
74536+#include <linux/grsecurity.h>
74537+#include <linux/grinternal.h>
74538+#include <linux/capability.h>
74539+#include <linux/module.h>
74540+#include <linux/compat.h>
74541+
74542+#include <asm/uaccess.h>
74543+
74544+#ifdef CONFIG_GRKERNSEC_EXECLOG
74545+static char gr_exec_arg_buf[132];
74546+static DEFINE_MUTEX(gr_exec_arg_mutex);
74547+#endif
74548+
74549+struct user_arg_ptr {
74550+#ifdef CONFIG_COMPAT
74551+ bool is_compat;
74552+#endif
74553+ union {
74554+ const char __user *const __user *native;
74555+#ifdef CONFIG_COMPAT
74556+ const compat_uptr_t __user *compat;
74557+#endif
74558+ } ptr;
74559+};
74560+
74561+extern const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr);
74562+
74563+void
74564+gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv)
74565+{
74566+#ifdef CONFIG_GRKERNSEC_EXECLOG
74567+ char *grarg = gr_exec_arg_buf;
74568+ unsigned int i, x, execlen = 0;
74569+ char c;
74570+
74571+ if (!((grsec_enable_execlog && grsec_enable_group &&
74572+ in_group_p(grsec_audit_gid))
74573+ || (grsec_enable_execlog && !grsec_enable_group)))
74574+ return;
74575+
74576+ mutex_lock(&gr_exec_arg_mutex);
74577+ memset(grarg, 0, sizeof(gr_exec_arg_buf));
74578+
74579+ for (i = 0; i < bprm->argc && execlen < 128; i++) {
74580+ const char __user *p;
74581+ unsigned int len;
74582+
74583+ p = get_user_arg_ptr(argv, i);
74584+ if (IS_ERR(p))
74585+ goto log;
74586+
74587+ len = strnlen_user(p, 128 - execlen);
74588+ if (len > 128 - execlen)
74589+ len = 128 - execlen;
74590+ else if (len > 0)
74591+ len--;
74592+ if (copy_from_user(grarg + execlen, p, len))
74593+ goto log;
74594+
74595+ /* rewrite unprintable characters */
74596+ for (x = 0; x < len; x++) {
74597+ c = *(grarg + execlen + x);
74598+ if (c < 32 || c > 126)
74599+ *(grarg + execlen + x) = ' ';
74600+ }
74601+
74602+ execlen += len;
74603+ *(grarg + execlen) = ' ';
74604+ *(grarg + execlen + 1) = '\0';
74605+ execlen++;
74606+ }
74607+
74608+ log:
74609+ gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
74610+ bprm->file->f_path.mnt, grarg);
74611+ mutex_unlock(&gr_exec_arg_mutex);
74612+#endif
74613+ return;
74614+}
74615+
74616+#ifdef CONFIG_GRKERNSEC
74617+extern int gr_acl_is_capable(const int cap);
74618+extern int gr_acl_is_capable_nolog(const int cap);
74619+extern int gr_task_acl_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
74620+extern int gr_task_acl_is_capable_nolog(const struct task_struct *task, const int cap);
74621+extern int gr_chroot_is_capable(const int cap);
74622+extern int gr_chroot_is_capable_nolog(const int cap);
74623+extern int gr_task_chroot_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
74624+extern int gr_task_chroot_is_capable_nolog(const struct task_struct *task, const int cap);
74625+#endif
74626+
74627+const char *captab_log[] = {
74628+ "CAP_CHOWN",
74629+ "CAP_DAC_OVERRIDE",
74630+ "CAP_DAC_READ_SEARCH",
74631+ "CAP_FOWNER",
74632+ "CAP_FSETID",
74633+ "CAP_KILL",
74634+ "CAP_SETGID",
74635+ "CAP_SETUID",
74636+ "CAP_SETPCAP",
74637+ "CAP_LINUX_IMMUTABLE",
74638+ "CAP_NET_BIND_SERVICE",
74639+ "CAP_NET_BROADCAST",
74640+ "CAP_NET_ADMIN",
74641+ "CAP_NET_RAW",
74642+ "CAP_IPC_LOCK",
74643+ "CAP_IPC_OWNER",
74644+ "CAP_SYS_MODULE",
74645+ "CAP_SYS_RAWIO",
74646+ "CAP_SYS_CHROOT",
74647+ "CAP_SYS_PTRACE",
74648+ "CAP_SYS_PACCT",
74649+ "CAP_SYS_ADMIN",
74650+ "CAP_SYS_BOOT",
74651+ "CAP_SYS_NICE",
74652+ "CAP_SYS_RESOURCE",
74653+ "CAP_SYS_TIME",
74654+ "CAP_SYS_TTY_CONFIG",
74655+ "CAP_MKNOD",
74656+ "CAP_LEASE",
74657+ "CAP_AUDIT_WRITE",
74658+ "CAP_AUDIT_CONTROL",
74659+ "CAP_SETFCAP",
74660+ "CAP_MAC_OVERRIDE",
74661+ "CAP_MAC_ADMIN",
74662+ "CAP_SYSLOG",
74663+ "CAP_WAKE_ALARM"
74664+};
74665+
74666+int captab_log_entries = sizeof(captab_log)/sizeof(captab_log[0]);
74667+
74668+int gr_is_capable(const int cap)
74669+{
74670+#ifdef CONFIG_GRKERNSEC
74671+ if (gr_acl_is_capable(cap) && gr_chroot_is_capable(cap))
74672+ return 1;
74673+ return 0;
74674+#else
74675+ return 1;
74676+#endif
74677+}
74678+
74679+int gr_task_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
74680+{
74681+#ifdef CONFIG_GRKERNSEC
74682+ if (gr_task_acl_is_capable(task, cred, cap) && gr_task_chroot_is_capable(task, cred, cap))
74683+ return 1;
74684+ return 0;
74685+#else
74686+ return 1;
74687+#endif
74688+}
74689+
74690+int gr_is_capable_nolog(const int cap)
74691+{
74692+#ifdef CONFIG_GRKERNSEC
74693+ if (gr_acl_is_capable_nolog(cap) && gr_chroot_is_capable_nolog(cap))
74694+ return 1;
74695+ return 0;
74696+#else
74697+ return 1;
74698+#endif
74699+}
74700+
74701+int gr_task_is_capable_nolog(const struct task_struct *task, const int cap)
74702+{
74703+#ifdef CONFIG_GRKERNSEC
74704+ if (gr_task_acl_is_capable_nolog(task, cap) && gr_task_chroot_is_capable_nolog(task, cap))
74705+ return 1;
74706+ return 0;
74707+#else
74708+ return 1;
74709+#endif
74710+}
74711+
74712+EXPORT_SYMBOL_GPL(gr_is_capable);
74713+EXPORT_SYMBOL_GPL(gr_is_capable_nolog);
74714+EXPORT_SYMBOL_GPL(gr_task_is_capable);
74715+EXPORT_SYMBOL_GPL(gr_task_is_capable_nolog);
74716diff --git a/grsecurity/grsec_fifo.c b/grsecurity/grsec_fifo.c
74717new file mode 100644
74718index 0000000..06cc6ea
74719--- /dev/null
74720+++ b/grsecurity/grsec_fifo.c
74721@@ -0,0 +1,24 @@
74722+#include <linux/kernel.h>
74723+#include <linux/sched.h>
74724+#include <linux/fs.h>
74725+#include <linux/file.h>
74726+#include <linux/grinternal.h>
74727+
74728+int
74729+gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
74730+ const struct dentry *dir, const int flag, const int acc_mode)
74731+{
74732+#ifdef CONFIG_GRKERNSEC_FIFO
74733+ const struct cred *cred = current_cred();
74734+
74735+ if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
74736+ !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
74737+ !uid_eq(dentry->d_inode->i_uid, dir->d_inode->i_uid) &&
74738+ !uid_eq(cred->fsuid, dentry->d_inode->i_uid)) {
74739+ if (!inode_permission(dentry->d_inode, acc_mode))
74740+ gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, GR_GLOBAL_UID(dentry->d_inode->i_uid), GR_GLOBAL_GID(dentry->d_inode->i_gid));
74741+ return -EACCES;
74742+ }
74743+#endif
74744+ return 0;
74745+}
74746diff --git a/grsecurity/grsec_fork.c b/grsecurity/grsec_fork.c
74747new file mode 100644
74748index 0000000..8ca18bf
74749--- /dev/null
74750+++ b/grsecurity/grsec_fork.c
74751@@ -0,0 +1,23 @@
74752+#include <linux/kernel.h>
74753+#include <linux/sched.h>
74754+#include <linux/grsecurity.h>
74755+#include <linux/grinternal.h>
74756+#include <linux/errno.h>
74757+
74758+void
74759+gr_log_forkfail(const int retval)
74760+{
74761+#ifdef CONFIG_GRKERNSEC_FORKFAIL
74762+ if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) {
74763+ switch (retval) {
74764+ case -EAGAIN:
74765+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN");
74766+ break;
74767+ case -ENOMEM:
74768+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM");
74769+ break;
74770+ }
74771+ }
74772+#endif
74773+ return;
74774+}
74775diff --git a/grsecurity/grsec_init.c b/grsecurity/grsec_init.c
74776new file mode 100644
74777index 0000000..ae6c028
74778--- /dev/null
74779+++ b/grsecurity/grsec_init.c
74780@@ -0,0 +1,272 @@
74781+#include <linux/kernel.h>
74782+#include <linux/sched.h>
74783+#include <linux/mm.h>
74784+#include <linux/gracl.h>
74785+#include <linux/slab.h>
74786+#include <linux/vmalloc.h>
74787+#include <linux/percpu.h>
74788+#include <linux/module.h>
74789+
74790+int grsec_enable_ptrace_readexec;
74791+int grsec_enable_setxid;
74792+int grsec_enable_symlinkown;
74793+kgid_t grsec_symlinkown_gid;
74794+int grsec_enable_brute;
74795+int grsec_enable_link;
74796+int grsec_enable_dmesg;
74797+int grsec_enable_harden_ptrace;
74798+int grsec_enable_harden_ipc;
74799+int grsec_enable_fifo;
74800+int grsec_enable_execlog;
74801+int grsec_enable_signal;
74802+int grsec_enable_forkfail;
74803+int grsec_enable_audit_ptrace;
74804+int grsec_enable_time;
74805+int grsec_enable_group;
74806+kgid_t grsec_audit_gid;
74807+int grsec_enable_chdir;
74808+int grsec_enable_mount;
74809+int grsec_enable_rofs;
74810+int grsec_deny_new_usb;
74811+int grsec_enable_chroot_findtask;
74812+int grsec_enable_chroot_mount;
74813+int grsec_enable_chroot_shmat;
74814+int grsec_enable_chroot_fchdir;
74815+int grsec_enable_chroot_double;
74816+int grsec_enable_chroot_pivot;
74817+int grsec_enable_chroot_chdir;
74818+int grsec_enable_chroot_chmod;
74819+int grsec_enable_chroot_mknod;
74820+int grsec_enable_chroot_nice;
74821+int grsec_enable_chroot_execlog;
74822+int grsec_enable_chroot_caps;
74823+int grsec_enable_chroot_sysctl;
74824+int grsec_enable_chroot_unix;
74825+int grsec_enable_tpe;
74826+kgid_t grsec_tpe_gid;
74827+int grsec_enable_blackhole;
74828+#ifdef CONFIG_IPV6_MODULE
74829+EXPORT_SYMBOL_GPL(grsec_enable_blackhole);
74830+#endif
74831+int grsec_lastack_retries;
74832+int grsec_enable_tpe_all;
74833+int grsec_enable_tpe_invert;
74834+int grsec_enable_socket_all;
74835+kgid_t grsec_socket_all_gid;
74836+int grsec_enable_socket_client;
74837+kgid_t grsec_socket_client_gid;
74838+int grsec_enable_socket_server;
74839+kgid_t grsec_socket_server_gid;
74840+int grsec_resource_logging;
74841+int grsec_disable_privio;
74842+int grsec_enable_log_rwxmaps;
74843+int grsec_lock;
74844+
74845+DEFINE_SPINLOCK(grsec_alert_lock);
74846+unsigned long grsec_alert_wtime = 0;
74847+unsigned long grsec_alert_fyet = 0;
74848+
74849+DEFINE_SPINLOCK(grsec_audit_lock);
74850+
74851+DEFINE_RWLOCK(grsec_exec_file_lock);
74852+
74853+char *gr_shared_page[4];
74854+
74855+char *gr_alert_log_fmt;
74856+char *gr_audit_log_fmt;
74857+char *gr_alert_log_buf;
74858+char *gr_audit_log_buf;
74859+
74860+void __init
74861+grsecurity_init(void)
74862+{
74863+ int j;
74864+ /* create the per-cpu shared pages */
74865+
74866+#ifdef CONFIG_X86
74867+ memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
74868+#endif
74869+
74870+ for (j = 0; j < 4; j++) {
74871+ gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
74872+ if (gr_shared_page[j] == NULL) {
74873+ panic("Unable to allocate grsecurity shared page");
74874+ return;
74875+ }
74876+ }
74877+
74878+ /* allocate log buffers */
74879+ gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
74880+ if (!gr_alert_log_fmt) {
74881+ panic("Unable to allocate grsecurity alert log format buffer");
74882+ return;
74883+ }
74884+ gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
74885+ if (!gr_audit_log_fmt) {
74886+ panic("Unable to allocate grsecurity audit log format buffer");
74887+ return;
74888+ }
74889+ gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
74890+ if (!gr_alert_log_buf) {
74891+ panic("Unable to allocate grsecurity alert log buffer");
74892+ return;
74893+ }
74894+ gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
74895+ if (!gr_audit_log_buf) {
74896+ panic("Unable to allocate grsecurity audit log buffer");
74897+ return;
74898+ }
74899+
74900+#ifdef CONFIG_GRKERNSEC_IO
74901+#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO)
74902+ grsec_disable_privio = 1;
74903+#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON)
74904+ grsec_disable_privio = 1;
74905+#else
74906+ grsec_disable_privio = 0;
74907+#endif
74908+#endif
74909+
74910+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
74911+ /* for backward compatibility, tpe_invert always defaults to on if
74912+ enabled in the kernel
74913+ */
74914+ grsec_enable_tpe_invert = 1;
74915+#endif
74916+
74917+#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
74918+#ifndef CONFIG_GRKERNSEC_SYSCTL
74919+ grsec_lock = 1;
74920+#endif
74921+
74922+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
74923+ grsec_enable_log_rwxmaps = 1;
74924+#endif
74925+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
74926+ grsec_enable_group = 1;
74927+ grsec_audit_gid = KGIDT_INIT(CONFIG_GRKERNSEC_AUDIT_GID);
74928+#endif
74929+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
74930+ grsec_enable_ptrace_readexec = 1;
74931+#endif
74932+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
74933+ grsec_enable_chdir = 1;
74934+#endif
74935+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
74936+ grsec_enable_harden_ptrace = 1;
74937+#endif
74938+#ifdef CONFIG_GRKERNSEC_HARDEN_IPC
74939+ grsec_enable_harden_ipc = 1;
74940+#endif
74941+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
74942+ grsec_enable_mount = 1;
74943+#endif
74944+#ifdef CONFIG_GRKERNSEC_LINK
74945+ grsec_enable_link = 1;
74946+#endif
74947+#ifdef CONFIG_GRKERNSEC_BRUTE
74948+ grsec_enable_brute = 1;
74949+#endif
74950+#ifdef CONFIG_GRKERNSEC_DMESG
74951+ grsec_enable_dmesg = 1;
74952+#endif
74953+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
74954+ grsec_enable_blackhole = 1;
74955+ grsec_lastack_retries = 4;
74956+#endif
74957+#ifdef CONFIG_GRKERNSEC_FIFO
74958+ grsec_enable_fifo = 1;
74959+#endif
74960+#ifdef CONFIG_GRKERNSEC_EXECLOG
74961+ grsec_enable_execlog = 1;
74962+#endif
74963+#ifdef CONFIG_GRKERNSEC_SETXID
74964+ grsec_enable_setxid = 1;
74965+#endif
74966+#ifdef CONFIG_GRKERNSEC_SIGNAL
74967+ grsec_enable_signal = 1;
74968+#endif
74969+#ifdef CONFIG_GRKERNSEC_FORKFAIL
74970+ grsec_enable_forkfail = 1;
74971+#endif
74972+#ifdef CONFIG_GRKERNSEC_TIME
74973+ grsec_enable_time = 1;
74974+#endif
74975+#ifdef CONFIG_GRKERNSEC_RESLOG
74976+ grsec_resource_logging = 1;
74977+#endif
74978+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
74979+ grsec_enable_chroot_findtask = 1;
74980+#endif
74981+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
74982+ grsec_enable_chroot_unix = 1;
74983+#endif
74984+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
74985+ grsec_enable_chroot_mount = 1;
74986+#endif
74987+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
74988+ grsec_enable_chroot_fchdir = 1;
74989+#endif
74990+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
74991+ grsec_enable_chroot_shmat = 1;
74992+#endif
74993+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
74994+ grsec_enable_audit_ptrace = 1;
74995+#endif
74996+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
74997+ grsec_enable_chroot_double = 1;
74998+#endif
74999+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
75000+ grsec_enable_chroot_pivot = 1;
75001+#endif
75002+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
75003+ grsec_enable_chroot_chdir = 1;
75004+#endif
75005+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
75006+ grsec_enable_chroot_chmod = 1;
75007+#endif
75008+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
75009+ grsec_enable_chroot_mknod = 1;
75010+#endif
75011+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
75012+ grsec_enable_chroot_nice = 1;
75013+#endif
75014+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
75015+ grsec_enable_chroot_execlog = 1;
75016+#endif
75017+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
75018+ grsec_enable_chroot_caps = 1;
75019+#endif
75020+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
75021+ grsec_enable_chroot_sysctl = 1;
75022+#endif
75023+#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
75024+ grsec_enable_symlinkown = 1;
75025+ grsec_symlinkown_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SYMLINKOWN_GID);
75026+#endif
75027+#ifdef CONFIG_GRKERNSEC_TPE
75028+ grsec_enable_tpe = 1;
75029+ grsec_tpe_gid = KGIDT_INIT(CONFIG_GRKERNSEC_TPE_GID);
75030+#ifdef CONFIG_GRKERNSEC_TPE_ALL
75031+ grsec_enable_tpe_all = 1;
75032+#endif
75033+#endif
75034+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
75035+ grsec_enable_socket_all = 1;
75036+ grsec_socket_all_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SOCKET_ALL_GID);
75037+#endif
75038+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
75039+ grsec_enable_socket_client = 1;
75040+ grsec_socket_client_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SOCKET_CLIENT_GID);
75041+#endif
75042+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
75043+ grsec_enable_socket_server = 1;
75044+ grsec_socket_server_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SOCKET_SERVER_GID);
75045+#endif
75046+#endif
75047+#ifdef CONFIG_GRKERNSEC_DENYUSB_FORCE
75048+ grsec_deny_new_usb = 1;
75049+#endif
75050+
75051+ return;
75052+}
75053diff --git a/grsecurity/grsec_ipc.c b/grsecurity/grsec_ipc.c
75054new file mode 100644
75055index 0000000..1773300
75056--- /dev/null
75057+++ b/grsecurity/grsec_ipc.c
75058@@ -0,0 +1,48 @@
75059+#include <linux/kernel.h>
75060+#include <linux/mm.h>
75061+#include <linux/sched.h>
75062+#include <linux/file.h>
75063+#include <linux/ipc.h>
75064+#include <linux/ipc_namespace.h>
75065+#include <linux/grsecurity.h>
75066+#include <linux/grinternal.h>
75067+
75068+int
75069+gr_ipc_permitted(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp, int requested_mode, int granted_mode)
75070+{
75071+#ifdef CONFIG_GRKERNSEC_HARDEN_IPC
75072+ int write;
75073+ int orig_granted_mode;
75074+ kuid_t euid;
75075+ kgid_t egid;
75076+
75077+ if (!grsec_enable_harden_ipc)
75078+ return 1;
75079+
75080+ euid = current_euid();
75081+ egid = current_egid();
75082+
75083+ write = requested_mode & 00002;
75084+ orig_granted_mode = ipcp->mode;
75085+
75086+ if (uid_eq(euid, ipcp->cuid) || uid_eq(euid, ipcp->uid))
75087+ orig_granted_mode >>= 6;
75088+ else {
75089+ /* if likely wrong permissions, lock to user */
75090+ if (orig_granted_mode & 0007)
75091+ orig_granted_mode = 0;
75092+ /* otherwise do a egid-only check */
75093+ else if (gid_eq(egid, ipcp->cgid) || gid_eq(egid, ipcp->gid))
75094+ orig_granted_mode >>= 3;
75095+ /* otherwise, no access */
75096+ else
75097+ orig_granted_mode = 0;
75098+ }
75099+ if (!(requested_mode & ~granted_mode & 0007) && (requested_mode & ~orig_granted_mode & 0007) &&
75100+ !ns_capable_nolog(ns->user_ns, CAP_IPC_OWNER)) {
75101+ gr_log_str_int(GR_DONT_AUDIT, GR_IPC_DENIED_MSG, write ? "write" : "read", GR_GLOBAL_UID(ipcp->cuid));
75102+ return 0;
75103+ }
75104+#endif
75105+ return 1;
75106+}
75107diff --git a/grsecurity/grsec_link.c b/grsecurity/grsec_link.c
75108new file mode 100644
75109index 0000000..5e05e20
75110--- /dev/null
75111+++ b/grsecurity/grsec_link.c
75112@@ -0,0 +1,58 @@
75113+#include <linux/kernel.h>
75114+#include <linux/sched.h>
75115+#include <linux/fs.h>
75116+#include <linux/file.h>
75117+#include <linux/grinternal.h>
75118+
75119+int gr_handle_symlink_owner(const struct path *link, const struct inode *target)
75120+{
75121+#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
75122+ const struct inode *link_inode = link->dentry->d_inode;
75123+
75124+ if (grsec_enable_symlinkown && in_group_p(grsec_symlinkown_gid) &&
75125+ /* ignore root-owned links, e.g. /proc/self */
75126+ gr_is_global_nonroot(link_inode->i_uid) && target &&
75127+ !uid_eq(link_inode->i_uid, target->i_uid)) {
75128+ gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINKOWNER_MSG, link->dentry, link->mnt, link_inode->i_uid, target->i_uid);
75129+ return 1;
75130+ }
75131+#endif
75132+ return 0;
75133+}
75134+
75135+int
75136+gr_handle_follow_link(const struct inode *parent,
75137+ const struct inode *inode,
75138+ const struct dentry *dentry, const struct vfsmount *mnt)
75139+{
75140+#ifdef CONFIG_GRKERNSEC_LINK
75141+ const struct cred *cred = current_cred();
75142+
75143+ if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
75144+ (parent->i_mode & S_ISVTX) && !uid_eq(parent->i_uid, inode->i_uid) &&
75145+ (parent->i_mode & S_IWOTH) && !uid_eq(cred->fsuid, inode->i_uid)) {
75146+ gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
75147+ return -EACCES;
75148+ }
75149+#endif
75150+ return 0;
75151+}
75152+
75153+int
75154+gr_handle_hardlink(const struct dentry *dentry,
75155+ const struct vfsmount *mnt,
75156+ struct inode *inode, const int mode, const struct filename *to)
75157+{
75158+#ifdef CONFIG_GRKERNSEC_LINK
75159+ const struct cred *cred = current_cred();
75160+
75161+ if (grsec_enable_link && !uid_eq(cred->fsuid, inode->i_uid) &&
75162+ (!S_ISREG(mode) || is_privileged_binary(dentry) ||
75163+ (inode_permission(inode, MAY_READ | MAY_WRITE))) &&
75164+ !capable(CAP_FOWNER) && gr_is_global_nonroot(cred->uid)) {
75165+ gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to->name);
75166+ return -EPERM;
75167+ }
75168+#endif
75169+ return 0;
75170+}
75171diff --git a/grsecurity/grsec_log.c b/grsecurity/grsec_log.c
75172new file mode 100644
75173index 0000000..dbe0a6b
75174--- /dev/null
75175+++ b/grsecurity/grsec_log.c
75176@@ -0,0 +1,341 @@
75177+#include <linux/kernel.h>
75178+#include <linux/sched.h>
75179+#include <linux/file.h>
75180+#include <linux/tty.h>
75181+#include <linux/fs.h>
75182+#include <linux/mm.h>
75183+#include <linux/grinternal.h>
75184+
75185+#ifdef CONFIG_TREE_PREEMPT_RCU
75186+#define DISABLE_PREEMPT() preempt_disable()
75187+#define ENABLE_PREEMPT() preempt_enable()
75188+#else
75189+#define DISABLE_PREEMPT()
75190+#define ENABLE_PREEMPT()
75191+#endif
75192+
75193+#define BEGIN_LOCKS(x) \
75194+ DISABLE_PREEMPT(); \
75195+ rcu_read_lock(); \
75196+ read_lock(&tasklist_lock); \
75197+ read_lock(&grsec_exec_file_lock); \
75198+ if (x != GR_DO_AUDIT) \
75199+ spin_lock(&grsec_alert_lock); \
75200+ else \
75201+ spin_lock(&grsec_audit_lock)
75202+
75203+#define END_LOCKS(x) \
75204+ if (x != GR_DO_AUDIT) \
75205+ spin_unlock(&grsec_alert_lock); \
75206+ else \
75207+ spin_unlock(&grsec_audit_lock); \
75208+ read_unlock(&grsec_exec_file_lock); \
75209+ read_unlock(&tasklist_lock); \
75210+ rcu_read_unlock(); \
75211+ ENABLE_PREEMPT(); \
75212+ if (x == GR_DONT_AUDIT) \
75213+ gr_handle_alertkill(current)
75214+
75215+enum {
75216+ FLOODING,
75217+ NO_FLOODING
75218+};
75219+
75220+extern char *gr_alert_log_fmt;
75221+extern char *gr_audit_log_fmt;
75222+extern char *gr_alert_log_buf;
75223+extern char *gr_audit_log_buf;
75224+
75225+static int gr_log_start(int audit)
75226+{
75227+ char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
75228+ char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
75229+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
75230+#if (CONFIG_GRKERNSEC_FLOODTIME > 0 && CONFIG_GRKERNSEC_FLOODBURST > 0)
75231+ unsigned long curr_secs = get_seconds();
75232+
75233+ if (audit == GR_DO_AUDIT)
75234+ goto set_fmt;
75235+
75236+ if (!grsec_alert_wtime || time_after(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)) {
75237+ grsec_alert_wtime = curr_secs;
75238+ grsec_alert_fyet = 0;
75239+ } else if (time_before_eq(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)
75240+ && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) {
75241+ grsec_alert_fyet++;
75242+ } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
75243+ grsec_alert_wtime = curr_secs;
75244+ grsec_alert_fyet++;
75245+ printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
75246+ return FLOODING;
75247+ }
75248+ else return FLOODING;
75249+
75250+set_fmt:
75251+#endif
75252+ memset(buf, 0, PAGE_SIZE);
75253+ if (current->signal->curr_ip && gr_acl_is_enabled()) {
75254+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
75255+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
75256+ } else if (current->signal->curr_ip) {
75257+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
75258+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip);
75259+ } else if (gr_acl_is_enabled()) {
75260+ sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
75261+ snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
75262+ } else {
75263+ sprintf(fmt, "%s%s", loglevel, "grsec: ");
75264+ strcpy(buf, fmt);
75265+ }
75266+
75267+ return NO_FLOODING;
75268+}
75269+
75270+static void gr_log_middle(int audit, const char *msg, va_list ap)
75271+ __attribute__ ((format (printf, 2, 0)));
75272+
75273+static void gr_log_middle(int audit, const char *msg, va_list ap)
75274+{
75275+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
75276+ unsigned int len = strlen(buf);
75277+
75278+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
75279+
75280+ return;
75281+}
75282+
75283+static void gr_log_middle_varargs(int audit, const char *msg, ...)
75284+ __attribute__ ((format (printf, 2, 3)));
75285+
75286+static void gr_log_middle_varargs(int audit, const char *msg, ...)
75287+{
75288+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
75289+ unsigned int len = strlen(buf);
75290+ va_list ap;
75291+
75292+ va_start(ap, msg);
75293+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
75294+ va_end(ap);
75295+
75296+ return;
75297+}
75298+
75299+static void gr_log_end(int audit, int append_default)
75300+{
75301+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
75302+ if (append_default) {
75303+ struct task_struct *task = current;
75304+ struct task_struct *parent = task->real_parent;
75305+ const struct cred *cred = __task_cred(task);
75306+ const struct cred *pcred = __task_cred(parent);
75307+ unsigned int len = strlen(buf);
75308+
75309+ snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
75310+ }
75311+
75312+ printk("%s\n", buf);
75313+
75314+ return;
75315+}
75316+
75317+void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
75318+{
75319+ int logtype;
75320+ char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
75321+ char *str1 = NULL, *str2 = NULL, *str3 = NULL;
75322+ void *voidptr = NULL;
75323+ int num1 = 0, num2 = 0;
75324+ unsigned long ulong1 = 0, ulong2 = 0;
75325+ struct dentry *dentry = NULL;
75326+ struct vfsmount *mnt = NULL;
75327+ struct file *file = NULL;
75328+ struct task_struct *task = NULL;
75329+ struct vm_area_struct *vma = NULL;
75330+ const struct cred *cred, *pcred;
75331+ va_list ap;
75332+
75333+ BEGIN_LOCKS(audit);
75334+ logtype = gr_log_start(audit);
75335+ if (logtype == FLOODING) {
75336+ END_LOCKS(audit);
75337+ return;
75338+ }
75339+ va_start(ap, argtypes);
75340+ switch (argtypes) {
75341+ case GR_TTYSNIFF:
75342+ task = va_arg(ap, struct task_struct *);
75343+ gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task_pid_nr(task), gr_parent_task_fullpath0(task), task->real_parent->comm, task_pid_nr(task->real_parent));
75344+ break;
75345+ case GR_SYSCTL_HIDDEN:
75346+ str1 = va_arg(ap, char *);
75347+ gr_log_middle_varargs(audit, msg, result, str1);
75348+ break;
75349+ case GR_RBAC:
75350+ dentry = va_arg(ap, struct dentry *);
75351+ mnt = va_arg(ap, struct vfsmount *);
75352+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
75353+ break;
75354+ case GR_RBAC_STR:
75355+ dentry = va_arg(ap, struct dentry *);
75356+ mnt = va_arg(ap, struct vfsmount *);
75357+ str1 = va_arg(ap, char *);
75358+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
75359+ break;
75360+ case GR_STR_RBAC:
75361+ str1 = va_arg(ap, char *);
75362+ dentry = va_arg(ap, struct dentry *);
75363+ mnt = va_arg(ap, struct vfsmount *);
75364+ gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
75365+ break;
75366+ case GR_RBAC_MODE2:
75367+ dentry = va_arg(ap, struct dentry *);
75368+ mnt = va_arg(ap, struct vfsmount *);
75369+ str1 = va_arg(ap, char *);
75370+ str2 = va_arg(ap, char *);
75371+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
75372+ break;
75373+ case GR_RBAC_MODE3:
75374+ dentry = va_arg(ap, struct dentry *);
75375+ mnt = va_arg(ap, struct vfsmount *);
75376+ str1 = va_arg(ap, char *);
75377+ str2 = va_arg(ap, char *);
75378+ str3 = va_arg(ap, char *);
75379+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
75380+ break;
75381+ case GR_FILENAME:
75382+ dentry = va_arg(ap, struct dentry *);
75383+ mnt = va_arg(ap, struct vfsmount *);
75384+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
75385+ break;
75386+ case GR_STR_FILENAME:
75387+ str1 = va_arg(ap, char *);
75388+ dentry = va_arg(ap, struct dentry *);
75389+ mnt = va_arg(ap, struct vfsmount *);
75390+ gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
75391+ break;
75392+ case GR_FILENAME_STR:
75393+ dentry = va_arg(ap, struct dentry *);
75394+ mnt = va_arg(ap, struct vfsmount *);
75395+ str1 = va_arg(ap, char *);
75396+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
75397+ break;
75398+ case GR_FILENAME_TWO_INT:
75399+ dentry = va_arg(ap, struct dentry *);
75400+ mnt = va_arg(ap, struct vfsmount *);
75401+ num1 = va_arg(ap, int);
75402+ num2 = va_arg(ap, int);
75403+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
75404+ break;
75405+ case GR_FILENAME_TWO_INT_STR:
75406+ dentry = va_arg(ap, struct dentry *);
75407+ mnt = va_arg(ap, struct vfsmount *);
75408+ num1 = va_arg(ap, int);
75409+ num2 = va_arg(ap, int);
75410+ str1 = va_arg(ap, char *);
75411+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
75412+ break;
75413+ case GR_TEXTREL:
75414+ file = va_arg(ap, struct file *);
75415+ ulong1 = va_arg(ap, unsigned long);
75416+ ulong2 = va_arg(ap, unsigned long);
75417+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2);
75418+ break;
75419+ case GR_PTRACE:
75420+ task = va_arg(ap, struct task_struct *);
75421+ gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task_pid_nr(task));
75422+ break;
75423+ case GR_RESOURCE:
75424+ task = va_arg(ap, struct task_struct *);
75425+ cred = __task_cred(task);
75426+ pcred = __task_cred(task->real_parent);
75427+ ulong1 = va_arg(ap, unsigned long);
75428+ str1 = va_arg(ap, char *);
75429+ ulong2 = va_arg(ap, unsigned long);
75430+ gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
75431+ break;
75432+ case GR_CAP:
75433+ task = va_arg(ap, struct task_struct *);
75434+ cred = __task_cred(task);
75435+ pcred = __task_cred(task->real_parent);
75436+ str1 = va_arg(ap, char *);
75437+ gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
75438+ break;
75439+ case GR_SIG:
75440+ str1 = va_arg(ap, char *);
75441+ voidptr = va_arg(ap, void *);
75442+ gr_log_middle_varargs(audit, msg, str1, voidptr);
75443+ break;
75444+ case GR_SIG2:
75445+ task = va_arg(ap, struct task_struct *);
75446+ cred = __task_cred(task);
75447+ pcred = __task_cred(task->real_parent);
75448+ num1 = va_arg(ap, int);
75449+ gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath0(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
75450+ break;
75451+ case GR_CRASH1:
75452+ task = va_arg(ap, struct task_struct *);
75453+ cred = __task_cred(task);
75454+ pcred = __task_cred(task->real_parent);
75455+ ulong1 = va_arg(ap, unsigned long);
75456+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid), GR_GLOBAL_UID(cred->uid), ulong1);
75457+ break;
75458+ case GR_CRASH2:
75459+ task = va_arg(ap, struct task_struct *);
75460+ cred = __task_cred(task);
75461+ pcred = __task_cred(task->real_parent);
75462+ ulong1 = va_arg(ap, unsigned long);
75463+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid), ulong1);
75464+ break;
75465+ case GR_RWXMAP:
75466+ file = va_arg(ap, struct file *);
75467+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>");
75468+ break;
75469+ case GR_RWXMAPVMA:
75470+ vma = va_arg(ap, struct vm_area_struct *);
75471+ if (vma->vm_file)
75472+ str1 = gr_to_filename(vma->vm_file->f_path.dentry, vma->vm_file->f_path.mnt);
75473+ else if (vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP))
75474+ str1 = "<stack>";
75475+ else if (vma->vm_start <= current->mm->brk &&
75476+ vma->vm_end >= current->mm->start_brk)
75477+ str1 = "<heap>";
75478+ else
75479+ str1 = "<anonymous mapping>";
75480+ gr_log_middle_varargs(audit, msg, str1);
75481+ break;
75482+ case GR_PSACCT:
75483+ {
75484+ unsigned int wday, cday;
75485+ __u8 whr, chr;
75486+ __u8 wmin, cmin;
75487+ __u8 wsec, csec;
75488+ char cur_tty[64] = { 0 };
75489+ char parent_tty[64] = { 0 };
75490+
75491+ task = va_arg(ap, struct task_struct *);
75492+ wday = va_arg(ap, unsigned int);
75493+ cday = va_arg(ap, unsigned int);
75494+ whr = va_arg(ap, int);
75495+ chr = va_arg(ap, int);
75496+ wmin = va_arg(ap, int);
75497+ cmin = va_arg(ap, int);
75498+ wsec = va_arg(ap, int);
75499+ csec = va_arg(ap, int);
75500+ ulong1 = va_arg(ap, unsigned long);
75501+ cred = __task_cred(task);
75502+ pcred = __task_cred(task->real_parent);
75503+
75504+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task_pid_nr(task), &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
75505+ }
75506+ break;
75507+ default:
75508+ gr_log_middle(audit, msg, ap);
75509+ }
75510+ va_end(ap);
75511+ // these don't need DEFAULTSECARGS printed on the end
75512+ if (argtypes == GR_CRASH1 || argtypes == GR_CRASH2)
75513+ gr_log_end(audit, 0);
75514+ else
75515+ gr_log_end(audit, 1);
75516+ END_LOCKS(audit);
75517+}
75518diff --git a/grsecurity/grsec_mem.c b/grsecurity/grsec_mem.c
75519new file mode 100644
75520index 0000000..0e39d8c
75521--- /dev/null
75522+++ b/grsecurity/grsec_mem.c
75523@@ -0,0 +1,48 @@
75524+#include <linux/kernel.h>
75525+#include <linux/sched.h>
75526+#include <linux/mm.h>
75527+#include <linux/mman.h>
75528+#include <linux/module.h>
75529+#include <linux/grinternal.h>
75530+
75531+void gr_handle_msr_write(void)
75532+{
75533+ gr_log_noargs(GR_DONT_AUDIT, GR_MSRWRITE_MSG);
75534+ return;
75535+}
75536+EXPORT_SYMBOL_GPL(gr_handle_msr_write);
75537+
75538+void
75539+gr_handle_ioperm(void)
75540+{
75541+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
75542+ return;
75543+}
75544+
75545+void
75546+gr_handle_iopl(void)
75547+{
75548+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
75549+ return;
75550+}
75551+
75552+void
75553+gr_handle_mem_readwrite(u64 from, u64 to)
75554+{
75555+ gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to);
75556+ return;
75557+}
75558+
75559+void
75560+gr_handle_vm86(void)
75561+{
75562+ gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
75563+ return;
75564+}
75565+
75566+void
75567+gr_log_badprocpid(const char *entry)
75568+{
75569+ gr_log_str(GR_DONT_AUDIT, GR_BADPROCPID_MSG, entry);
75570+ return;
75571+}
75572diff --git a/grsecurity/grsec_mount.c b/grsecurity/grsec_mount.c
75573new file mode 100644
75574index 0000000..cd9e124
75575--- /dev/null
75576+++ b/grsecurity/grsec_mount.c
75577@@ -0,0 +1,65 @@
75578+#include <linux/kernel.h>
75579+#include <linux/sched.h>
75580+#include <linux/mount.h>
75581+#include <linux/major.h>
75582+#include <linux/grsecurity.h>
75583+#include <linux/grinternal.h>
75584+
75585+void
75586+gr_log_remount(const char *devname, const int retval)
75587+{
75588+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
75589+ if (grsec_enable_mount && (retval >= 0))
75590+ gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
75591+#endif
75592+ return;
75593+}
75594+
75595+void
75596+gr_log_unmount(const char *devname, const int retval)
75597+{
75598+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
75599+ if (grsec_enable_mount && (retval >= 0))
75600+ gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
75601+#endif
75602+ return;
75603+}
75604+
75605+void
75606+gr_log_mount(const char *from, const char *to, const int retval)
75607+{
75608+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
75609+ if (grsec_enable_mount && (retval >= 0))
75610+ gr_log_str_str(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from ? from : "none", to);
75611+#endif
75612+ return;
75613+}
75614+
75615+int
75616+gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
75617+{
75618+#ifdef CONFIG_GRKERNSEC_ROFS
75619+ if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
75620+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
75621+ return -EPERM;
75622+ } else
75623+ return 0;
75624+#endif
75625+ return 0;
75626+}
75627+
75628+int
75629+gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
75630+{
75631+#ifdef CONFIG_GRKERNSEC_ROFS
75632+ struct inode *inode = dentry->d_inode;
75633+
75634+ if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
75635+ inode && (S_ISBLK(inode->i_mode) || (S_ISCHR(inode->i_mode) && imajor(inode) == RAW_MAJOR))) {
75636+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
75637+ return -EPERM;
75638+ } else
75639+ return 0;
75640+#endif
75641+ return 0;
75642+}
75643diff --git a/grsecurity/grsec_pax.c b/grsecurity/grsec_pax.c
75644new file mode 100644
75645index 0000000..6ee9d50
75646--- /dev/null
75647+++ b/grsecurity/grsec_pax.c
75648@@ -0,0 +1,45 @@
75649+#include <linux/kernel.h>
75650+#include <linux/sched.h>
75651+#include <linux/mm.h>
75652+#include <linux/file.h>
75653+#include <linux/grinternal.h>
75654+#include <linux/grsecurity.h>
75655+
75656+void
75657+gr_log_textrel(struct vm_area_struct * vma)
75658+{
75659+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
75660+ if (grsec_enable_log_rwxmaps)
75661+ gr_log_textrel_ulong_ulong(GR_DONT_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
75662+#endif
75663+ return;
75664+}
75665+
75666+void gr_log_ptgnustack(struct file *file)
75667+{
75668+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
75669+ if (grsec_enable_log_rwxmaps)
75670+ gr_log_rwxmap(GR_DONT_AUDIT, GR_PTGNUSTACK_MSG, file);
75671+#endif
75672+ return;
75673+}
75674+
75675+void
75676+gr_log_rwxmmap(struct file *file)
75677+{
75678+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
75679+ if (grsec_enable_log_rwxmaps)
75680+ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file);
75681+#endif
75682+ return;
75683+}
75684+
75685+void
75686+gr_log_rwxmprotect(struct vm_area_struct *vma)
75687+{
75688+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
75689+ if (grsec_enable_log_rwxmaps)
75690+ gr_log_rwxmap_vma(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, vma);
75691+#endif
75692+ return;
75693+}
75694diff --git a/grsecurity/grsec_proc.c b/grsecurity/grsec_proc.c
75695new file mode 100644
75696index 0000000..2005a3a
75697--- /dev/null
75698+++ b/grsecurity/grsec_proc.c
75699@@ -0,0 +1,20 @@
75700+#include <linux/kernel.h>
75701+#include <linux/sched.h>
75702+#include <linux/grsecurity.h>
75703+#include <linux/grinternal.h>
75704+
75705+int gr_proc_is_restricted(void)
75706+{
75707+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
75708+ const struct cred *cred = current_cred();
75709+#endif
75710+
75711+#ifdef CONFIG_GRKERNSEC_PROC_USER
75712+ if (!uid_eq(cred->fsuid, GLOBAL_ROOT_UID))
75713+ return -EACCES;
75714+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
75715+ if (!uid_eq(cred->fsuid, GLOBAL_ROOT_UID) && !in_group_p(grsec_proc_gid))
75716+ return -EACCES;
75717+#endif
75718+ return 0;
75719+}
75720diff --git a/grsecurity/grsec_ptrace.c b/grsecurity/grsec_ptrace.c
75721new file mode 100644
75722index 0000000..f7f29aa
75723--- /dev/null
75724+++ b/grsecurity/grsec_ptrace.c
75725@@ -0,0 +1,30 @@
75726+#include <linux/kernel.h>
75727+#include <linux/sched.h>
75728+#include <linux/grinternal.h>
75729+#include <linux/security.h>
75730+
75731+void
75732+gr_audit_ptrace(struct task_struct *task)
75733+{
75734+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
75735+ if (grsec_enable_audit_ptrace)
75736+ gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task);
75737+#endif
75738+ return;
75739+}
75740+
75741+int
75742+gr_ptrace_readexec(struct file *file, int unsafe_flags)
75743+{
75744+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
75745+ const struct dentry *dentry = file->f_path.dentry;
75746+ const struct vfsmount *mnt = file->f_path.mnt;
75747+
75748+ if (grsec_enable_ptrace_readexec && (unsafe_flags & LSM_UNSAFE_PTRACE) &&
75749+ (inode_permission(dentry->d_inode, MAY_READ) || !gr_acl_handle_open(dentry, mnt, MAY_READ))) {
75750+ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_READEXEC_MSG, dentry, mnt);
75751+ return -EACCES;
75752+ }
75753+#endif
75754+ return 0;
75755+}
75756diff --git a/grsecurity/grsec_sig.c b/grsecurity/grsec_sig.c
75757new file mode 100644
75758index 0000000..3860c7e
75759--- /dev/null
75760+++ b/grsecurity/grsec_sig.c
75761@@ -0,0 +1,236 @@
75762+#include <linux/kernel.h>
75763+#include <linux/sched.h>
75764+#include <linux/fs.h>
75765+#include <linux/delay.h>
75766+#include <linux/grsecurity.h>
75767+#include <linux/grinternal.h>
75768+#include <linux/hardirq.h>
75769+
75770+char *signames[] = {
75771+ [SIGSEGV] = "Segmentation fault",
75772+ [SIGILL] = "Illegal instruction",
75773+ [SIGABRT] = "Abort",
75774+ [SIGBUS] = "Invalid alignment/Bus error"
75775+};
75776+
75777+void
75778+gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
75779+{
75780+#ifdef CONFIG_GRKERNSEC_SIGNAL
75781+ if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
75782+ (sig == SIGABRT) || (sig == SIGBUS))) {
75783+ if (task_pid_nr(t) == task_pid_nr(current)) {
75784+ gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
75785+ } else {
75786+ gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
75787+ }
75788+ }
75789+#endif
75790+ return;
75791+}
75792+
75793+int
75794+gr_handle_signal(const struct task_struct *p, const int sig)
75795+{
75796+#ifdef CONFIG_GRKERNSEC
75797+ /* ignore the 0 signal for protected task checks */
75798+ if (task_pid_nr(current) > 1 && sig && gr_check_protected_task(p)) {
75799+ gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
75800+ return -EPERM;
75801+ } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
75802+ return -EPERM;
75803+ }
75804+#endif
75805+ return 0;
75806+}
75807+
75808+#ifdef CONFIG_GRKERNSEC
75809+extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
75810+
75811+int gr_fake_force_sig(int sig, struct task_struct *t)
75812+{
75813+ unsigned long int flags;
75814+ int ret, blocked, ignored;
75815+ struct k_sigaction *action;
75816+
75817+ spin_lock_irqsave(&t->sighand->siglock, flags);
75818+ action = &t->sighand->action[sig-1];
75819+ ignored = action->sa.sa_handler == SIG_IGN;
75820+ blocked = sigismember(&t->blocked, sig);
75821+ if (blocked || ignored) {
75822+ action->sa.sa_handler = SIG_DFL;
75823+ if (blocked) {
75824+ sigdelset(&t->blocked, sig);
75825+ recalc_sigpending_and_wake(t);
75826+ }
75827+ }
75828+ if (action->sa.sa_handler == SIG_DFL)
75829+ t->signal->flags &= ~SIGNAL_UNKILLABLE;
75830+ ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
75831+
75832+ spin_unlock_irqrestore(&t->sighand->siglock, flags);
75833+
75834+ return ret;
75835+}
75836+#endif
75837+
75838+#define GR_USER_BAN_TIME (15 * 60)
75839+#define GR_DAEMON_BRUTE_TIME (30 * 60)
75840+
75841+void gr_handle_brute_attach(int dumpable)
75842+{
75843+#ifdef CONFIG_GRKERNSEC_BRUTE
75844+ struct task_struct *p = current;
75845+ kuid_t uid = GLOBAL_ROOT_UID;
75846+ int daemon = 0;
75847+
75848+ if (!grsec_enable_brute)
75849+ return;
75850+
75851+ rcu_read_lock();
75852+ read_lock(&tasklist_lock);
75853+ read_lock(&grsec_exec_file_lock);
75854+ if (p->real_parent && gr_is_same_file(p->real_parent->exec_file, p->exec_file)) {
75855+ p->real_parent->brute_expires = get_seconds() + GR_DAEMON_BRUTE_TIME;
75856+ p->real_parent->brute = 1;
75857+ daemon = 1;
75858+ } else {
75859+ const struct cred *cred = __task_cred(p), *cred2;
75860+ struct task_struct *tsk, *tsk2;
75861+
75862+ if (dumpable != SUID_DUMP_USER && gr_is_global_nonroot(cred->uid)) {
75863+ struct user_struct *user;
75864+
75865+ uid = cred->uid;
75866+
75867+ /* this is put upon execution past expiration */
75868+ user = find_user(uid);
75869+ if (user == NULL)
75870+ goto unlock;
75871+ user->suid_banned = 1;
75872+ user->suid_ban_expires = get_seconds() + GR_USER_BAN_TIME;
75873+ if (user->suid_ban_expires == ~0UL)
75874+ user->suid_ban_expires--;
75875+
75876+ /* only kill other threads of the same binary, from the same user */
75877+ do_each_thread(tsk2, tsk) {
75878+ cred2 = __task_cred(tsk);
75879+ if (tsk != p && uid_eq(cred2->uid, uid) && gr_is_same_file(tsk->exec_file, p->exec_file))
75880+ gr_fake_force_sig(SIGKILL, tsk);
75881+ } while_each_thread(tsk2, tsk);
75882+ }
75883+ }
75884+unlock:
75885+ read_unlock(&grsec_exec_file_lock);
75886+ read_unlock(&tasklist_lock);
75887+ rcu_read_unlock();
75888+
75889+ if (gr_is_global_nonroot(uid))
75890+ gr_log_fs_int2(GR_DONT_AUDIT, GR_BRUTE_SUID_MSG, p->exec_file->f_path.dentry, p->exec_file->f_path.mnt, GR_GLOBAL_UID(uid), GR_USER_BAN_TIME / 60);
75891+ else if (daemon)
75892+ gr_log_noargs(GR_DONT_AUDIT, GR_BRUTE_DAEMON_MSG);
75893+
75894+#endif
75895+ return;
75896+}
75897+
75898+void gr_handle_brute_check(void)
75899+{
75900+#ifdef CONFIG_GRKERNSEC_BRUTE
75901+ struct task_struct *p = current;
75902+
75903+ if (unlikely(p->brute)) {
75904+ if (!grsec_enable_brute)
75905+ p->brute = 0;
75906+ else if (time_before(get_seconds(), p->brute_expires))
75907+ msleep(30 * 1000);
75908+ }
75909+#endif
75910+ return;
75911+}
75912+
75913+void gr_handle_kernel_exploit(void)
75914+{
75915+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
75916+ const struct cred *cred;
75917+ struct task_struct *tsk, *tsk2;
75918+ struct user_struct *user;
75919+ kuid_t uid;
75920+
75921+ if (in_irq() || in_serving_softirq() || in_nmi())
75922+ panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context");
75923+
75924+ uid = current_uid();
75925+
75926+ if (gr_is_global_root(uid))
75927+ panic("grsec: halting the system due to suspicious kernel crash caused by root");
75928+ else {
75929+ /* kill all the processes of this user, hold a reference
75930+ to their creds struct, and prevent them from creating
75931+ another process until system reset
75932+ */
75933+ printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n",
75934+ GR_GLOBAL_UID(uid));
75935+ /* we intentionally leak this ref */
75936+ user = get_uid(current->cred->user);
75937+ if (user)
75938+ user->kernel_banned = 1;
75939+
75940+ /* kill all processes of this user */
75941+ read_lock(&tasklist_lock);
75942+ do_each_thread(tsk2, tsk) {
75943+ cred = __task_cred(tsk);
75944+ if (uid_eq(cred->uid, uid))
75945+ gr_fake_force_sig(SIGKILL, tsk);
75946+ } while_each_thread(tsk2, tsk);
75947+ read_unlock(&tasklist_lock);
75948+ }
75949+#endif
75950+}
75951+
75952+#ifdef CONFIG_GRKERNSEC_BRUTE
75953+static bool suid_ban_expired(struct user_struct *user)
75954+{
75955+ if (user->suid_ban_expires != ~0UL && time_after_eq(get_seconds(), user->suid_ban_expires)) {
75956+ user->suid_banned = 0;
75957+ user->suid_ban_expires = 0;
75958+ free_uid(user);
75959+ return true;
75960+ }
75961+
75962+ return false;
75963+}
75964+#endif
75965+
75966+int gr_process_kernel_exec_ban(void)
75967+{
75968+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
75969+ if (unlikely(current->cred->user->kernel_banned))
75970+ return -EPERM;
75971+#endif
75972+ return 0;
75973+}
75974+
75975+int gr_process_kernel_setuid_ban(struct user_struct *user)
75976+{
75977+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
75978+ if (unlikely(user->kernel_banned))
75979+ gr_fake_force_sig(SIGKILL, current);
75980+#endif
75981+ return 0;
75982+}
75983+
75984+int gr_process_suid_exec_ban(const struct linux_binprm *bprm)
75985+{
75986+#ifdef CONFIG_GRKERNSEC_BRUTE
75987+ struct user_struct *user = current->cred->user;
75988+ if (unlikely(user->suid_banned)) {
75989+ if (suid_ban_expired(user))
75990+ return 0;
75991+ /* disallow execution of suid binaries only */
75992+ else if (!uid_eq(bprm->cred->euid, current->cred->uid))
75993+ return -EPERM;
75994+ }
75995+#endif
75996+ return 0;
75997+}
75998diff --git a/grsecurity/grsec_sock.c b/grsecurity/grsec_sock.c
75999new file mode 100644
76000index 0000000..c0aef3a
76001--- /dev/null
76002+++ b/grsecurity/grsec_sock.c
76003@@ -0,0 +1,244 @@
76004+#include <linux/kernel.h>
76005+#include <linux/module.h>
76006+#include <linux/sched.h>
76007+#include <linux/file.h>
76008+#include <linux/net.h>
76009+#include <linux/in.h>
76010+#include <linux/ip.h>
76011+#include <net/sock.h>
76012+#include <net/inet_sock.h>
76013+#include <linux/grsecurity.h>
76014+#include <linux/grinternal.h>
76015+#include <linux/gracl.h>
76016+
76017+extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
76018+extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
76019+
76020+EXPORT_SYMBOL_GPL(gr_search_udp_recvmsg);
76021+EXPORT_SYMBOL_GPL(gr_search_udp_sendmsg);
76022+
76023+#ifdef CONFIG_UNIX_MODULE
76024+EXPORT_SYMBOL_GPL(gr_acl_handle_unix);
76025+EXPORT_SYMBOL_GPL(gr_acl_handle_mknod);
76026+EXPORT_SYMBOL_GPL(gr_handle_chroot_unix);
76027+EXPORT_SYMBOL_GPL(gr_handle_create);
76028+#endif
76029+
76030+#ifdef CONFIG_GRKERNSEC
76031+#define gr_conn_table_size 32749
76032+struct conn_table_entry {
76033+ struct conn_table_entry *next;
76034+ struct signal_struct *sig;
76035+};
76036+
76037+struct conn_table_entry *gr_conn_table[gr_conn_table_size];
76038+DEFINE_SPINLOCK(gr_conn_table_lock);
76039+
76040+extern const char * gr_socktype_to_name(unsigned char type);
76041+extern const char * gr_proto_to_name(unsigned char proto);
76042+extern const char * gr_sockfamily_to_name(unsigned char family);
76043+
76044+static __inline__ int
76045+conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
76046+{
76047+ return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
76048+}
76049+
76050+static __inline__ int
76051+conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr,
76052+ __u16 sport, __u16 dport)
76053+{
76054+ if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
76055+ sig->gr_sport == sport && sig->gr_dport == dport))
76056+ return 1;
76057+ else
76058+ return 0;
76059+}
76060+
76061+static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
76062+{
76063+ struct conn_table_entry **match;
76064+ unsigned int index;
76065+
76066+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
76067+ sig->gr_sport, sig->gr_dport,
76068+ gr_conn_table_size);
76069+
76070+ newent->sig = sig;
76071+
76072+ match = &gr_conn_table[index];
76073+ newent->next = *match;
76074+ *match = newent;
76075+
76076+ return;
76077+}
76078+
76079+static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
76080+{
76081+ struct conn_table_entry *match, *last = NULL;
76082+ unsigned int index;
76083+
76084+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
76085+ sig->gr_sport, sig->gr_dport,
76086+ gr_conn_table_size);
76087+
76088+ match = gr_conn_table[index];
76089+ while (match && !conn_match(match->sig,
76090+ sig->gr_saddr, sig->gr_daddr, sig->gr_sport,
76091+ sig->gr_dport)) {
76092+ last = match;
76093+ match = match->next;
76094+ }
76095+
76096+ if (match) {
76097+ if (last)
76098+ last->next = match->next;
76099+ else
76100+ gr_conn_table[index] = NULL;
76101+ kfree(match);
76102+ }
76103+
76104+ return;
76105+}
76106+
76107+static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
76108+ __u16 sport, __u16 dport)
76109+{
76110+ struct conn_table_entry *match;
76111+ unsigned int index;
76112+
76113+ index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
76114+
76115+ match = gr_conn_table[index];
76116+ while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
76117+ match = match->next;
76118+
76119+ if (match)
76120+ return match->sig;
76121+ else
76122+ return NULL;
76123+}
76124+
76125+#endif
76126+
76127+void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet)
76128+{
76129+#ifdef CONFIG_GRKERNSEC
76130+ struct signal_struct *sig = task->signal;
76131+ struct conn_table_entry *newent;
76132+
76133+ newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
76134+ if (newent == NULL)
76135+ return;
76136+ /* no bh lock needed since we are called with bh disabled */
76137+ spin_lock(&gr_conn_table_lock);
76138+ gr_del_task_from_ip_table_nolock(sig);
76139+ sig->gr_saddr = inet->inet_rcv_saddr;
76140+ sig->gr_daddr = inet->inet_daddr;
76141+ sig->gr_sport = inet->inet_sport;
76142+ sig->gr_dport = inet->inet_dport;
76143+ gr_add_to_task_ip_table_nolock(sig, newent);
76144+ spin_unlock(&gr_conn_table_lock);
76145+#endif
76146+ return;
76147+}
76148+
76149+void gr_del_task_from_ip_table(struct task_struct *task)
76150+{
76151+#ifdef CONFIG_GRKERNSEC
76152+ spin_lock_bh(&gr_conn_table_lock);
76153+ gr_del_task_from_ip_table_nolock(task->signal);
76154+ spin_unlock_bh(&gr_conn_table_lock);
76155+#endif
76156+ return;
76157+}
76158+
76159+void
76160+gr_attach_curr_ip(const struct sock *sk)
76161+{
76162+#ifdef CONFIG_GRKERNSEC
76163+ struct signal_struct *p, *set;
76164+ const struct inet_sock *inet = inet_sk(sk);
76165+
76166+ if (unlikely(sk->sk_protocol != IPPROTO_TCP))
76167+ return;
76168+
76169+ set = current->signal;
76170+
76171+ spin_lock_bh(&gr_conn_table_lock);
76172+ p = gr_lookup_task_ip_table(inet->inet_daddr, inet->inet_rcv_saddr,
76173+ inet->inet_dport, inet->inet_sport);
76174+ if (unlikely(p != NULL)) {
76175+ set->curr_ip = p->curr_ip;
76176+ set->used_accept = 1;
76177+ gr_del_task_from_ip_table_nolock(p);
76178+ spin_unlock_bh(&gr_conn_table_lock);
76179+ return;
76180+ }
76181+ spin_unlock_bh(&gr_conn_table_lock);
76182+
76183+ set->curr_ip = inet->inet_daddr;
76184+ set->used_accept = 1;
76185+#endif
76186+ return;
76187+}
76188+
76189+int
76190+gr_handle_sock_all(const int family, const int type, const int protocol)
76191+{
76192+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
76193+ if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
76194+ (family != AF_UNIX)) {
76195+ if (family == AF_INET)
76196+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol));
76197+ else
76198+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol);
76199+ return -EACCES;
76200+ }
76201+#endif
76202+ return 0;
76203+}
76204+
76205+int
76206+gr_handle_sock_server(const struct sockaddr *sck)
76207+{
76208+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
76209+ if (grsec_enable_socket_server &&
76210+ in_group_p(grsec_socket_server_gid) &&
76211+ sck && (sck->sa_family != AF_UNIX) &&
76212+ (sck->sa_family != AF_LOCAL)) {
76213+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
76214+ return -EACCES;
76215+ }
76216+#endif
76217+ return 0;
76218+}
76219+
76220+int
76221+gr_handle_sock_server_other(const struct sock *sck)
76222+{
76223+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
76224+ if (grsec_enable_socket_server &&
76225+ in_group_p(grsec_socket_server_gid) &&
76226+ sck && (sck->sk_family != AF_UNIX) &&
76227+ (sck->sk_family != AF_LOCAL)) {
76228+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
76229+ return -EACCES;
76230+ }
76231+#endif
76232+ return 0;
76233+}
76234+
76235+int
76236+gr_handle_sock_client(const struct sockaddr *sck)
76237+{
76238+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
76239+ if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
76240+ sck && (sck->sa_family != AF_UNIX) &&
76241+ (sck->sa_family != AF_LOCAL)) {
76242+ gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
76243+ return -EACCES;
76244+ }
76245+#endif
76246+ return 0;
76247+}
76248diff --git a/grsecurity/grsec_sysctl.c b/grsecurity/grsec_sysctl.c
76249new file mode 100644
76250index 0000000..8159888
76251--- /dev/null
76252+++ b/grsecurity/grsec_sysctl.c
76253@@ -0,0 +1,479 @@
76254+#include <linux/kernel.h>
76255+#include <linux/sched.h>
76256+#include <linux/sysctl.h>
76257+#include <linux/grsecurity.h>
76258+#include <linux/grinternal.h>
76259+
76260+int
76261+gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
76262+{
76263+#ifdef CONFIG_GRKERNSEC_SYSCTL
76264+ if (dirname == NULL || name == NULL)
76265+ return 0;
76266+ if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
76267+ gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
76268+ return -EACCES;
76269+ }
76270+#endif
76271+ return 0;
76272+}
76273+
76274+#if defined(CONFIG_GRKERNSEC_ROFS) || defined(CONFIG_GRKERNSEC_DENYUSB)
76275+static int __maybe_unused __read_only one = 1;
76276+#endif
76277+
76278+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS) || \
76279+ defined(CONFIG_GRKERNSEC_DENYUSB)
76280+struct ctl_table grsecurity_table[] = {
76281+#ifdef CONFIG_GRKERNSEC_SYSCTL
76282+#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
76283+#ifdef CONFIG_GRKERNSEC_IO
76284+ {
76285+ .procname = "disable_priv_io",
76286+ .data = &grsec_disable_privio,
76287+ .maxlen = sizeof(int),
76288+ .mode = 0600,
76289+ .proc_handler = &proc_dointvec,
76290+ },
76291+#endif
76292+#endif
76293+#ifdef CONFIG_GRKERNSEC_LINK
76294+ {
76295+ .procname = "linking_restrictions",
76296+ .data = &grsec_enable_link,
76297+ .maxlen = sizeof(int),
76298+ .mode = 0600,
76299+ .proc_handler = &proc_dointvec,
76300+ },
76301+#endif
76302+#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
76303+ {
76304+ .procname = "enforce_symlinksifowner",
76305+ .data = &grsec_enable_symlinkown,
76306+ .maxlen = sizeof(int),
76307+ .mode = 0600,
76308+ .proc_handler = &proc_dointvec,
76309+ },
76310+ {
76311+ .procname = "symlinkown_gid",
76312+ .data = &grsec_symlinkown_gid,
76313+ .maxlen = sizeof(int),
76314+ .mode = 0600,
76315+ .proc_handler = &proc_dointvec,
76316+ },
76317+#endif
76318+#ifdef CONFIG_GRKERNSEC_BRUTE
76319+ {
76320+ .procname = "deter_bruteforce",
76321+ .data = &grsec_enable_brute,
76322+ .maxlen = sizeof(int),
76323+ .mode = 0600,
76324+ .proc_handler = &proc_dointvec,
76325+ },
76326+#endif
76327+#ifdef CONFIG_GRKERNSEC_FIFO
76328+ {
76329+ .procname = "fifo_restrictions",
76330+ .data = &grsec_enable_fifo,
76331+ .maxlen = sizeof(int),
76332+ .mode = 0600,
76333+ .proc_handler = &proc_dointvec,
76334+ },
76335+#endif
76336+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
76337+ {
76338+ .procname = "ptrace_readexec",
76339+ .data = &grsec_enable_ptrace_readexec,
76340+ .maxlen = sizeof(int),
76341+ .mode = 0600,
76342+ .proc_handler = &proc_dointvec,
76343+ },
76344+#endif
76345+#ifdef CONFIG_GRKERNSEC_SETXID
76346+ {
76347+ .procname = "consistent_setxid",
76348+ .data = &grsec_enable_setxid,
76349+ .maxlen = sizeof(int),
76350+ .mode = 0600,
76351+ .proc_handler = &proc_dointvec,
76352+ },
76353+#endif
76354+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
76355+ {
76356+ .procname = "ip_blackhole",
76357+ .data = &grsec_enable_blackhole,
76358+ .maxlen = sizeof(int),
76359+ .mode = 0600,
76360+ .proc_handler = &proc_dointvec,
76361+ },
76362+ {
76363+ .procname = "lastack_retries",
76364+ .data = &grsec_lastack_retries,
76365+ .maxlen = sizeof(int),
76366+ .mode = 0600,
76367+ .proc_handler = &proc_dointvec,
76368+ },
76369+#endif
76370+#ifdef CONFIG_GRKERNSEC_EXECLOG
76371+ {
76372+ .procname = "exec_logging",
76373+ .data = &grsec_enable_execlog,
76374+ .maxlen = sizeof(int),
76375+ .mode = 0600,
76376+ .proc_handler = &proc_dointvec,
76377+ },
76378+#endif
76379+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
76380+ {
76381+ .procname = "rwxmap_logging",
76382+ .data = &grsec_enable_log_rwxmaps,
76383+ .maxlen = sizeof(int),
76384+ .mode = 0600,
76385+ .proc_handler = &proc_dointvec,
76386+ },
76387+#endif
76388+#ifdef CONFIG_GRKERNSEC_SIGNAL
76389+ {
76390+ .procname = "signal_logging",
76391+ .data = &grsec_enable_signal,
76392+ .maxlen = sizeof(int),
76393+ .mode = 0600,
76394+ .proc_handler = &proc_dointvec,
76395+ },
76396+#endif
76397+#ifdef CONFIG_GRKERNSEC_FORKFAIL
76398+ {
76399+ .procname = "forkfail_logging",
76400+ .data = &grsec_enable_forkfail,
76401+ .maxlen = sizeof(int),
76402+ .mode = 0600,
76403+ .proc_handler = &proc_dointvec,
76404+ },
76405+#endif
76406+#ifdef CONFIG_GRKERNSEC_TIME
76407+ {
76408+ .procname = "timechange_logging",
76409+ .data = &grsec_enable_time,
76410+ .maxlen = sizeof(int),
76411+ .mode = 0600,
76412+ .proc_handler = &proc_dointvec,
76413+ },
76414+#endif
76415+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
76416+ {
76417+ .procname = "chroot_deny_shmat",
76418+ .data = &grsec_enable_chroot_shmat,
76419+ .maxlen = sizeof(int),
76420+ .mode = 0600,
76421+ .proc_handler = &proc_dointvec,
76422+ },
76423+#endif
76424+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
76425+ {
76426+ .procname = "chroot_deny_unix",
76427+ .data = &grsec_enable_chroot_unix,
76428+ .maxlen = sizeof(int),
76429+ .mode = 0600,
76430+ .proc_handler = &proc_dointvec,
76431+ },
76432+#endif
76433+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
76434+ {
76435+ .procname = "chroot_deny_mount",
76436+ .data = &grsec_enable_chroot_mount,
76437+ .maxlen = sizeof(int),
76438+ .mode = 0600,
76439+ .proc_handler = &proc_dointvec,
76440+ },
76441+#endif
76442+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
76443+ {
76444+ .procname = "chroot_deny_fchdir",
76445+ .data = &grsec_enable_chroot_fchdir,
76446+ .maxlen = sizeof(int),
76447+ .mode = 0600,
76448+ .proc_handler = &proc_dointvec,
76449+ },
76450+#endif
76451+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
76452+ {
76453+ .procname = "chroot_deny_chroot",
76454+ .data = &grsec_enable_chroot_double,
76455+ .maxlen = sizeof(int),
76456+ .mode = 0600,
76457+ .proc_handler = &proc_dointvec,
76458+ },
76459+#endif
76460+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
76461+ {
76462+ .procname = "chroot_deny_pivot",
76463+ .data = &grsec_enable_chroot_pivot,
76464+ .maxlen = sizeof(int),
76465+ .mode = 0600,
76466+ .proc_handler = &proc_dointvec,
76467+ },
76468+#endif
76469+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
76470+ {
76471+ .procname = "chroot_enforce_chdir",
76472+ .data = &grsec_enable_chroot_chdir,
76473+ .maxlen = sizeof(int),
76474+ .mode = 0600,
76475+ .proc_handler = &proc_dointvec,
76476+ },
76477+#endif
76478+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
76479+ {
76480+ .procname = "chroot_deny_chmod",
76481+ .data = &grsec_enable_chroot_chmod,
76482+ .maxlen = sizeof(int),
76483+ .mode = 0600,
76484+ .proc_handler = &proc_dointvec,
76485+ },
76486+#endif
76487+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
76488+ {
76489+ .procname = "chroot_deny_mknod",
76490+ .data = &grsec_enable_chroot_mknod,
76491+ .maxlen = sizeof(int),
76492+ .mode = 0600,
76493+ .proc_handler = &proc_dointvec,
76494+ },
76495+#endif
76496+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
76497+ {
76498+ .procname = "chroot_restrict_nice",
76499+ .data = &grsec_enable_chroot_nice,
76500+ .maxlen = sizeof(int),
76501+ .mode = 0600,
76502+ .proc_handler = &proc_dointvec,
76503+ },
76504+#endif
76505+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
76506+ {
76507+ .procname = "chroot_execlog",
76508+ .data = &grsec_enable_chroot_execlog,
76509+ .maxlen = sizeof(int),
76510+ .mode = 0600,
76511+ .proc_handler = &proc_dointvec,
76512+ },
76513+#endif
76514+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
76515+ {
76516+ .procname = "chroot_caps",
76517+ .data = &grsec_enable_chroot_caps,
76518+ .maxlen = sizeof(int),
76519+ .mode = 0600,
76520+ .proc_handler = &proc_dointvec,
76521+ },
76522+#endif
76523+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
76524+ {
76525+ .procname = "chroot_deny_sysctl",
76526+ .data = &grsec_enable_chroot_sysctl,
76527+ .maxlen = sizeof(int),
76528+ .mode = 0600,
76529+ .proc_handler = &proc_dointvec,
76530+ },
76531+#endif
76532+#ifdef CONFIG_GRKERNSEC_TPE
76533+ {
76534+ .procname = "tpe",
76535+ .data = &grsec_enable_tpe,
76536+ .maxlen = sizeof(int),
76537+ .mode = 0600,
76538+ .proc_handler = &proc_dointvec,
76539+ },
76540+ {
76541+ .procname = "tpe_gid",
76542+ .data = &grsec_tpe_gid,
76543+ .maxlen = sizeof(int),
76544+ .mode = 0600,
76545+ .proc_handler = &proc_dointvec,
76546+ },
76547+#endif
76548+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
76549+ {
76550+ .procname = "tpe_invert",
76551+ .data = &grsec_enable_tpe_invert,
76552+ .maxlen = sizeof(int),
76553+ .mode = 0600,
76554+ .proc_handler = &proc_dointvec,
76555+ },
76556+#endif
76557+#ifdef CONFIG_GRKERNSEC_TPE_ALL
76558+ {
76559+ .procname = "tpe_restrict_all",
76560+ .data = &grsec_enable_tpe_all,
76561+ .maxlen = sizeof(int),
76562+ .mode = 0600,
76563+ .proc_handler = &proc_dointvec,
76564+ },
76565+#endif
76566+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
76567+ {
76568+ .procname = "socket_all",
76569+ .data = &grsec_enable_socket_all,
76570+ .maxlen = sizeof(int),
76571+ .mode = 0600,
76572+ .proc_handler = &proc_dointvec,
76573+ },
76574+ {
76575+ .procname = "socket_all_gid",
76576+ .data = &grsec_socket_all_gid,
76577+ .maxlen = sizeof(int),
76578+ .mode = 0600,
76579+ .proc_handler = &proc_dointvec,
76580+ },
76581+#endif
76582+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
76583+ {
76584+ .procname = "socket_client",
76585+ .data = &grsec_enable_socket_client,
76586+ .maxlen = sizeof(int),
76587+ .mode = 0600,
76588+ .proc_handler = &proc_dointvec,
76589+ },
76590+ {
76591+ .procname = "socket_client_gid",
76592+ .data = &grsec_socket_client_gid,
76593+ .maxlen = sizeof(int),
76594+ .mode = 0600,
76595+ .proc_handler = &proc_dointvec,
76596+ },
76597+#endif
76598+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
76599+ {
76600+ .procname = "socket_server",
76601+ .data = &grsec_enable_socket_server,
76602+ .maxlen = sizeof(int),
76603+ .mode = 0600,
76604+ .proc_handler = &proc_dointvec,
76605+ },
76606+ {
76607+ .procname = "socket_server_gid",
76608+ .data = &grsec_socket_server_gid,
76609+ .maxlen = sizeof(int),
76610+ .mode = 0600,
76611+ .proc_handler = &proc_dointvec,
76612+ },
76613+#endif
76614+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
76615+ {
76616+ .procname = "audit_group",
76617+ .data = &grsec_enable_group,
76618+ .maxlen = sizeof(int),
76619+ .mode = 0600,
76620+ .proc_handler = &proc_dointvec,
76621+ },
76622+ {
76623+ .procname = "audit_gid",
76624+ .data = &grsec_audit_gid,
76625+ .maxlen = sizeof(int),
76626+ .mode = 0600,
76627+ .proc_handler = &proc_dointvec,
76628+ },
76629+#endif
76630+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
76631+ {
76632+ .procname = "audit_chdir",
76633+ .data = &grsec_enable_chdir,
76634+ .maxlen = sizeof(int),
76635+ .mode = 0600,
76636+ .proc_handler = &proc_dointvec,
76637+ },
76638+#endif
76639+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
76640+ {
76641+ .procname = "audit_mount",
76642+ .data = &grsec_enable_mount,
76643+ .maxlen = sizeof(int),
76644+ .mode = 0600,
76645+ .proc_handler = &proc_dointvec,
76646+ },
76647+#endif
76648+#ifdef CONFIG_GRKERNSEC_DMESG
76649+ {
76650+ .procname = "dmesg",
76651+ .data = &grsec_enable_dmesg,
76652+ .maxlen = sizeof(int),
76653+ .mode = 0600,
76654+ .proc_handler = &proc_dointvec,
76655+ },
76656+#endif
76657+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
76658+ {
76659+ .procname = "chroot_findtask",
76660+ .data = &grsec_enable_chroot_findtask,
76661+ .maxlen = sizeof(int),
76662+ .mode = 0600,
76663+ .proc_handler = &proc_dointvec,
76664+ },
76665+#endif
76666+#ifdef CONFIG_GRKERNSEC_RESLOG
76667+ {
76668+ .procname = "resource_logging",
76669+ .data = &grsec_resource_logging,
76670+ .maxlen = sizeof(int),
76671+ .mode = 0600,
76672+ .proc_handler = &proc_dointvec,
76673+ },
76674+#endif
76675+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
76676+ {
76677+ .procname = "audit_ptrace",
76678+ .data = &grsec_enable_audit_ptrace,
76679+ .maxlen = sizeof(int),
76680+ .mode = 0600,
76681+ .proc_handler = &proc_dointvec,
76682+ },
76683+#endif
76684+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
76685+ {
76686+ .procname = "harden_ptrace",
76687+ .data = &grsec_enable_harden_ptrace,
76688+ .maxlen = sizeof(int),
76689+ .mode = 0600,
76690+ .proc_handler = &proc_dointvec,
76691+ },
76692+#endif
76693+#ifdef CONFIG_GRKERNSEC_HARDEN_IPC
76694+ {
76695+ .procname = "harden_ipc",
76696+ .data = &grsec_enable_harden_ipc,
76697+ .maxlen = sizeof(int),
76698+ .mode = 0600,
76699+ .proc_handler = &proc_dointvec,
76700+ },
76701+#endif
76702+ {
76703+ .procname = "grsec_lock",
76704+ .data = &grsec_lock,
76705+ .maxlen = sizeof(int),
76706+ .mode = 0600,
76707+ .proc_handler = &proc_dointvec,
76708+ },
76709+#endif
76710+#ifdef CONFIG_GRKERNSEC_ROFS
76711+ {
76712+ .procname = "romount_protect",
76713+ .data = &grsec_enable_rofs,
76714+ .maxlen = sizeof(int),
76715+ .mode = 0600,
76716+ .proc_handler = &proc_dointvec_minmax,
76717+ .extra1 = &one,
76718+ .extra2 = &one,
76719+ },
76720+#endif
76721+#if defined(CONFIG_GRKERNSEC_DENYUSB) && !defined(CONFIG_GRKERNSEC_DENYUSB_FORCE)
76722+ {
76723+ .procname = "deny_new_usb",
76724+ .data = &grsec_deny_new_usb,
76725+ .maxlen = sizeof(int),
76726+ .mode = 0600,
76727+ .proc_handler = &proc_dointvec,
76728+ },
76729+#endif
76730+ { }
76731+};
76732+#endif
76733diff --git a/grsecurity/grsec_time.c b/grsecurity/grsec_time.c
76734new file mode 100644
76735index 0000000..61b514e
76736--- /dev/null
76737+++ b/grsecurity/grsec_time.c
76738@@ -0,0 +1,16 @@
76739+#include <linux/kernel.h>
76740+#include <linux/sched.h>
76741+#include <linux/grinternal.h>
76742+#include <linux/module.h>
76743+
76744+void
76745+gr_log_timechange(void)
76746+{
76747+#ifdef CONFIG_GRKERNSEC_TIME
76748+ if (grsec_enable_time)
76749+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
76750+#endif
76751+ return;
76752+}
76753+
76754+EXPORT_SYMBOL_GPL(gr_log_timechange);
76755diff --git a/grsecurity/grsec_tpe.c b/grsecurity/grsec_tpe.c
76756new file mode 100644
76757index 0000000..ee57dcf
76758--- /dev/null
76759+++ b/grsecurity/grsec_tpe.c
76760@@ -0,0 +1,73 @@
76761+#include <linux/kernel.h>
76762+#include <linux/sched.h>
76763+#include <linux/file.h>
76764+#include <linux/fs.h>
76765+#include <linux/grinternal.h>
76766+
76767+extern int gr_acl_tpe_check(void);
76768+
76769+int
76770+gr_tpe_allow(const struct file *file)
76771+{
76772+#ifdef CONFIG_GRKERNSEC
76773+ struct inode *inode = file->f_path.dentry->d_parent->d_inode;
76774+ const struct cred *cred = current_cred();
76775+ char *msg = NULL;
76776+ char *msg2 = NULL;
76777+
76778+ // never restrict root
76779+ if (gr_is_global_root(cred->uid))
76780+ return 1;
76781+
76782+ if (grsec_enable_tpe) {
76783+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
76784+ if (grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid))
76785+ msg = "not being in trusted group";
76786+ else if (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid))
76787+ msg = "being in untrusted group";
76788+#else
76789+ if (in_group_p(grsec_tpe_gid))
76790+ msg = "being in untrusted group";
76791+#endif
76792+ }
76793+ if (!msg && gr_acl_tpe_check())
76794+ msg = "being in untrusted role";
76795+
76796+ // not in any affected group/role
76797+ if (!msg)
76798+ goto next_check;
76799+
76800+ if (gr_is_global_nonroot(inode->i_uid))
76801+ msg2 = "file in non-root-owned directory";
76802+ else if (inode->i_mode & S_IWOTH)
76803+ msg2 = "file in world-writable directory";
76804+ else if (inode->i_mode & S_IWGRP)
76805+ msg2 = "file in group-writable directory";
76806+
76807+ if (msg && msg2) {
76808+ char fullmsg[70] = {0};
76809+ snprintf(fullmsg, sizeof(fullmsg)-1, "%s and %s", msg, msg2);
76810+ gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, fullmsg, file->f_path.dentry, file->f_path.mnt);
76811+ return 0;
76812+ }
76813+ msg = NULL;
76814+next_check:
76815+#ifdef CONFIG_GRKERNSEC_TPE_ALL
76816+ if (!grsec_enable_tpe || !grsec_enable_tpe_all)
76817+ return 1;
76818+
76819+ if (gr_is_global_nonroot(inode->i_uid) && !uid_eq(inode->i_uid, cred->uid))
76820+ msg = "directory not owned by user";
76821+ else if (inode->i_mode & S_IWOTH)
76822+ msg = "file in world-writable directory";
76823+ else if (inode->i_mode & S_IWGRP)
76824+ msg = "file in group-writable directory";
76825+
76826+ if (msg) {
76827+ gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, msg, file->f_path.dentry, file->f_path.mnt);
76828+ return 0;
76829+ }
76830+#endif
76831+#endif
76832+ return 1;
76833+}
76834diff --git a/grsecurity/grsec_usb.c b/grsecurity/grsec_usb.c
76835new file mode 100644
76836index 0000000..ae02d8e
76837--- /dev/null
76838+++ b/grsecurity/grsec_usb.c
76839@@ -0,0 +1,15 @@
76840+#include <linux/kernel.h>
76841+#include <linux/grinternal.h>
76842+#include <linux/module.h>
76843+
76844+int gr_handle_new_usb(void)
76845+{
76846+#ifdef CONFIG_GRKERNSEC_DENYUSB
76847+ if (grsec_deny_new_usb) {
76848+ printk(KERN_ALERT "grsec: denied insert of new USB device\n");
76849+ return 1;
76850+ }
76851+#endif
76852+ return 0;
76853+}
76854+EXPORT_SYMBOL_GPL(gr_handle_new_usb);
76855diff --git a/grsecurity/grsum.c b/grsecurity/grsum.c
76856new file mode 100644
76857index 0000000..9f7b1ac
76858--- /dev/null
76859+++ b/grsecurity/grsum.c
76860@@ -0,0 +1,61 @@
76861+#include <linux/err.h>
76862+#include <linux/kernel.h>
76863+#include <linux/sched.h>
76864+#include <linux/mm.h>
76865+#include <linux/scatterlist.h>
76866+#include <linux/crypto.h>
76867+#include <linux/gracl.h>
76868+
76869+
76870+#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
76871+#error "crypto and sha256 must be built into the kernel"
76872+#endif
76873+
76874+int
76875+chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
76876+{
76877+ char *p;
76878+ struct crypto_hash *tfm;
76879+ struct hash_desc desc;
76880+ struct scatterlist sg;
76881+ unsigned char temp_sum[GR_SHA_LEN];
76882+ volatile int retval = 0;
76883+ volatile int dummy = 0;
76884+ unsigned int i;
76885+
76886+ sg_init_table(&sg, 1);
76887+
76888+ tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
76889+ if (IS_ERR(tfm)) {
76890+ /* should never happen, since sha256 should be built in */
76891+ return 1;
76892+ }
76893+
76894+ desc.tfm = tfm;
76895+ desc.flags = 0;
76896+
76897+ crypto_hash_init(&desc);
76898+
76899+ p = salt;
76900+ sg_set_buf(&sg, p, GR_SALT_LEN);
76901+ crypto_hash_update(&desc, &sg, sg.length);
76902+
76903+ p = entry->pw;
76904+ sg_set_buf(&sg, p, strlen(p));
76905+
76906+ crypto_hash_update(&desc, &sg, sg.length);
76907+
76908+ crypto_hash_final(&desc, temp_sum);
76909+
76910+ memset(entry->pw, 0, GR_PW_LEN);
76911+
76912+ for (i = 0; i < GR_SHA_LEN; i++)
76913+ if (sum[i] != temp_sum[i])
76914+ retval = 1;
76915+ else
76916+ dummy = 1; // waste a cycle
76917+
76918+ crypto_free_hash(tfm);
76919+
76920+ return retval;
76921+}
76922diff --git a/include/asm-generic/4level-fixup.h b/include/asm-generic/4level-fixup.h
76923index 77ff547..181834f 100644
76924--- a/include/asm-generic/4level-fixup.h
76925+++ b/include/asm-generic/4level-fixup.h
76926@@ -13,8 +13,10 @@
76927 #define pmd_alloc(mm, pud, address) \
76928 ((unlikely(pgd_none(*(pud))) && __pmd_alloc(mm, pud, address))? \
76929 NULL: pmd_offset(pud, address))
76930+#define pmd_alloc_kernel(mm, pud, address) pmd_alloc((mm), (pud), (address))
76931
76932 #define pud_alloc(mm, pgd, address) (pgd)
76933+#define pud_alloc_kernel(mm, pgd, address) pud_alloc((mm), (pgd), (address))
76934 #define pud_offset(pgd, start) (pgd)
76935 #define pud_none(pud) 0
76936 #define pud_bad(pud) 0
76937diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h
76938index b7babf0..97f4c4f 100644
76939--- a/include/asm-generic/atomic-long.h
76940+++ b/include/asm-generic/atomic-long.h
76941@@ -22,6 +22,12 @@
76942
76943 typedef atomic64_t atomic_long_t;
76944
76945+#ifdef CONFIG_PAX_REFCOUNT
76946+typedef atomic64_unchecked_t atomic_long_unchecked_t;
76947+#else
76948+typedef atomic64_t atomic_long_unchecked_t;
76949+#endif
76950+
76951 #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
76952
76953 static inline long atomic_long_read(atomic_long_t *l)
76954@@ -31,6 +37,15 @@ static inline long atomic_long_read(atomic_long_t *l)
76955 return (long)atomic64_read(v);
76956 }
76957
76958+#ifdef CONFIG_PAX_REFCOUNT
76959+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
76960+{
76961+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
76962+
76963+ return (long)atomic64_read_unchecked(v);
76964+}
76965+#endif
76966+
76967 static inline void atomic_long_set(atomic_long_t *l, long i)
76968 {
76969 atomic64_t *v = (atomic64_t *)l;
76970@@ -38,6 +53,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
76971 atomic64_set(v, i);
76972 }
76973
76974+#ifdef CONFIG_PAX_REFCOUNT
76975+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
76976+{
76977+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
76978+
76979+ atomic64_set_unchecked(v, i);
76980+}
76981+#endif
76982+
76983 static inline void atomic_long_inc(atomic_long_t *l)
76984 {
76985 atomic64_t *v = (atomic64_t *)l;
76986@@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
76987 atomic64_inc(v);
76988 }
76989
76990+#ifdef CONFIG_PAX_REFCOUNT
76991+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
76992+{
76993+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
76994+
76995+ atomic64_inc_unchecked(v);
76996+}
76997+#endif
76998+
76999 static inline void atomic_long_dec(atomic_long_t *l)
77000 {
77001 atomic64_t *v = (atomic64_t *)l;
77002@@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
77003 atomic64_dec(v);
77004 }
77005
77006+#ifdef CONFIG_PAX_REFCOUNT
77007+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
77008+{
77009+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
77010+
77011+ atomic64_dec_unchecked(v);
77012+}
77013+#endif
77014+
77015 static inline void atomic_long_add(long i, atomic_long_t *l)
77016 {
77017 atomic64_t *v = (atomic64_t *)l;
77018@@ -59,6 +101,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
77019 atomic64_add(i, v);
77020 }
77021
77022+#ifdef CONFIG_PAX_REFCOUNT
77023+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
77024+{
77025+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
77026+
77027+ atomic64_add_unchecked(i, v);
77028+}
77029+#endif
77030+
77031 static inline void atomic_long_sub(long i, atomic_long_t *l)
77032 {
77033 atomic64_t *v = (atomic64_t *)l;
77034@@ -66,6 +117,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
77035 atomic64_sub(i, v);
77036 }
77037
77038+#ifdef CONFIG_PAX_REFCOUNT
77039+static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
77040+{
77041+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
77042+
77043+ atomic64_sub_unchecked(i, v);
77044+}
77045+#endif
77046+
77047 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
77048 {
77049 atomic64_t *v = (atomic64_t *)l;
77050@@ -94,13 +154,22 @@ static inline int atomic_long_add_negative(long i, atomic_long_t *l)
77051 return atomic64_add_negative(i, v);
77052 }
77053
77054-static inline long atomic_long_add_return(long i, atomic_long_t *l)
77055+static inline long __intentional_overflow(-1) atomic_long_add_return(long i, atomic_long_t *l)
77056 {
77057 atomic64_t *v = (atomic64_t *)l;
77058
77059 return (long)atomic64_add_return(i, v);
77060 }
77061
77062+#ifdef CONFIG_PAX_REFCOUNT
77063+static inline long atomic_long_add_return_unchecked(long i, atomic_long_unchecked_t *l)
77064+{
77065+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
77066+
77067+ return (long)atomic64_add_return_unchecked(i, v);
77068+}
77069+#endif
77070+
77071 static inline long atomic_long_sub_return(long i, atomic_long_t *l)
77072 {
77073 atomic64_t *v = (atomic64_t *)l;
77074@@ -115,6 +184,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
77075 return (long)atomic64_inc_return(v);
77076 }
77077
77078+#ifdef CONFIG_PAX_REFCOUNT
77079+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
77080+{
77081+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
77082+
77083+ return (long)atomic64_inc_return_unchecked(v);
77084+}
77085+#endif
77086+
77087 static inline long atomic_long_dec_return(atomic_long_t *l)
77088 {
77089 atomic64_t *v = (atomic64_t *)l;
77090@@ -140,6 +218,12 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
77091
77092 typedef atomic_t atomic_long_t;
77093
77094+#ifdef CONFIG_PAX_REFCOUNT
77095+typedef atomic_unchecked_t atomic_long_unchecked_t;
77096+#else
77097+typedef atomic_t atomic_long_unchecked_t;
77098+#endif
77099+
77100 #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
77101 static inline long atomic_long_read(atomic_long_t *l)
77102 {
77103@@ -148,6 +232,15 @@ static inline long atomic_long_read(atomic_long_t *l)
77104 return (long)atomic_read(v);
77105 }
77106
77107+#ifdef CONFIG_PAX_REFCOUNT
77108+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
77109+{
77110+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
77111+
77112+ return (long)atomic_read_unchecked(v);
77113+}
77114+#endif
77115+
77116 static inline void atomic_long_set(atomic_long_t *l, long i)
77117 {
77118 atomic_t *v = (atomic_t *)l;
77119@@ -155,6 +248,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
77120 atomic_set(v, i);
77121 }
77122
77123+#ifdef CONFIG_PAX_REFCOUNT
77124+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
77125+{
77126+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
77127+
77128+ atomic_set_unchecked(v, i);
77129+}
77130+#endif
77131+
77132 static inline void atomic_long_inc(atomic_long_t *l)
77133 {
77134 atomic_t *v = (atomic_t *)l;
77135@@ -162,6 +264,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
77136 atomic_inc(v);
77137 }
77138
77139+#ifdef CONFIG_PAX_REFCOUNT
77140+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
77141+{
77142+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
77143+
77144+ atomic_inc_unchecked(v);
77145+}
77146+#endif
77147+
77148 static inline void atomic_long_dec(atomic_long_t *l)
77149 {
77150 atomic_t *v = (atomic_t *)l;
77151@@ -169,6 +280,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
77152 atomic_dec(v);
77153 }
77154
77155+#ifdef CONFIG_PAX_REFCOUNT
77156+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
77157+{
77158+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
77159+
77160+ atomic_dec_unchecked(v);
77161+}
77162+#endif
77163+
77164 static inline void atomic_long_add(long i, atomic_long_t *l)
77165 {
77166 atomic_t *v = (atomic_t *)l;
77167@@ -176,6 +296,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
77168 atomic_add(i, v);
77169 }
77170
77171+#ifdef CONFIG_PAX_REFCOUNT
77172+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
77173+{
77174+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
77175+
77176+ atomic_add_unchecked(i, v);
77177+}
77178+#endif
77179+
77180 static inline void atomic_long_sub(long i, atomic_long_t *l)
77181 {
77182 atomic_t *v = (atomic_t *)l;
77183@@ -183,6 +312,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
77184 atomic_sub(i, v);
77185 }
77186
77187+#ifdef CONFIG_PAX_REFCOUNT
77188+static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
77189+{
77190+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
77191+
77192+ atomic_sub_unchecked(i, v);
77193+}
77194+#endif
77195+
77196 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
77197 {
77198 atomic_t *v = (atomic_t *)l;
77199@@ -218,6 +356,16 @@ static inline long atomic_long_add_return(long i, atomic_long_t *l)
77200 return (long)atomic_add_return(i, v);
77201 }
77202
77203+#ifdef CONFIG_PAX_REFCOUNT
77204+static inline long atomic_long_add_return_unchecked(long i, atomic_long_unchecked_t *l)
77205+{
77206+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
77207+
77208+ return (long)atomic_add_return_unchecked(i, v);
77209+}
77210+
77211+#endif
77212+
77213 static inline long atomic_long_sub_return(long i, atomic_long_t *l)
77214 {
77215 atomic_t *v = (atomic_t *)l;
77216@@ -232,6 +380,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
77217 return (long)atomic_inc_return(v);
77218 }
77219
77220+#ifdef CONFIG_PAX_REFCOUNT
77221+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
77222+{
77223+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
77224+
77225+ return (long)atomic_inc_return_unchecked(v);
77226+}
77227+#endif
77228+
77229 static inline long atomic_long_dec_return(atomic_long_t *l)
77230 {
77231 atomic_t *v = (atomic_t *)l;
77232@@ -255,4 +412,57 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
77233
77234 #endif /* BITS_PER_LONG == 64 */
77235
77236+#ifdef CONFIG_PAX_REFCOUNT
77237+static inline void pax_refcount_needs_these_functions(void)
77238+{
77239+ atomic_read_unchecked((atomic_unchecked_t *)NULL);
77240+ atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
77241+ atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
77242+ atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
77243+ atomic_inc_unchecked((atomic_unchecked_t *)NULL);
77244+ (void)atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
77245+ atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
77246+ atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
77247+ atomic_dec_unchecked((atomic_unchecked_t *)NULL);
77248+ atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
77249+ (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
77250+#ifdef CONFIG_X86
77251+ atomic_clear_mask_unchecked(0, NULL);
77252+ atomic_set_mask_unchecked(0, NULL);
77253+#endif
77254+
77255+ atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
77256+ atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
77257+ atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
77258+ atomic_long_sub_unchecked(0, (atomic_long_unchecked_t *)NULL);
77259+ atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
77260+ atomic_long_add_return_unchecked(0, (atomic_long_unchecked_t *)NULL);
77261+ atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
77262+ atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
77263+}
77264+#else
77265+#define atomic_read_unchecked(v) atomic_read(v)
77266+#define atomic_set_unchecked(v, i) atomic_set((v), (i))
77267+#define atomic_add_unchecked(i, v) atomic_add((i), (v))
77268+#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
77269+#define atomic_inc_unchecked(v) atomic_inc(v)
77270+#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
77271+#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
77272+#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
77273+#define atomic_dec_unchecked(v) atomic_dec(v)
77274+#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
77275+#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
77276+#define atomic_clear_mask_unchecked(mask, v) atomic_clear_mask((mask), (v))
77277+#define atomic_set_mask_unchecked(mask, v) atomic_set_mask((mask), (v))
77278+
77279+#define atomic_long_read_unchecked(v) atomic_long_read(v)
77280+#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
77281+#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
77282+#define atomic_long_sub_unchecked(i, v) atomic_long_sub((i), (v))
77283+#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
77284+#define atomic_long_add_return_unchecked(i, v) atomic_long_add_return((i), (v))
77285+#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
77286+#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
77287+#endif
77288+
77289 #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
77290diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h
77291index 33bd2de..f31bff97 100644
77292--- a/include/asm-generic/atomic.h
77293+++ b/include/asm-generic/atomic.h
77294@@ -153,7 +153,7 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
77295 * Atomically clears the bits set in @mask from @v
77296 */
77297 #ifndef atomic_clear_mask
77298-static inline void atomic_clear_mask(unsigned long mask, atomic_t *v)
77299+static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
77300 {
77301 unsigned long flags;
77302
77303diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h
77304index b18ce4f..2ee2843 100644
77305--- a/include/asm-generic/atomic64.h
77306+++ b/include/asm-generic/atomic64.h
77307@@ -16,6 +16,8 @@ typedef struct {
77308 long long counter;
77309 } atomic64_t;
77310
77311+typedef atomic64_t atomic64_unchecked_t;
77312+
77313 #define ATOMIC64_INIT(i) { (i) }
77314
77315 extern long long atomic64_read(const atomic64_t *v);
77316@@ -39,4 +41,14 @@ extern int atomic64_add_unless(atomic64_t *v, long long a, long long u);
77317 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
77318 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
77319
77320+#define atomic64_read_unchecked(v) atomic64_read(v)
77321+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
77322+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
77323+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
77324+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
77325+#define atomic64_inc_unchecked(v) atomic64_inc(v)
77326+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
77327+#define atomic64_dec_unchecked(v) atomic64_dec(v)
77328+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
77329+
77330 #endif /* _ASM_GENERIC_ATOMIC64_H */
77331diff --git a/include/asm-generic/barrier.h b/include/asm-generic/barrier.h
77332index 6f692f8..2ad9dd2 100644
77333--- a/include/asm-generic/barrier.h
77334+++ b/include/asm-generic/barrier.h
77335@@ -66,7 +66,7 @@
77336 do { \
77337 compiletime_assert_atomic_type(*p); \
77338 smp_mb(); \
77339- ACCESS_ONCE(*p) = (v); \
77340+ ACCESS_ONCE_RW(*p) = (v); \
77341 } while (0)
77342
77343 #define smp_load_acquire(p) \
77344diff --git a/include/asm-generic/bitops/__fls.h b/include/asm-generic/bitops/__fls.h
77345index a60a7cc..0fe12f2 100644
77346--- a/include/asm-generic/bitops/__fls.h
77347+++ b/include/asm-generic/bitops/__fls.h
77348@@ -9,7 +9,7 @@
77349 *
77350 * Undefined if no set bit exists, so code should check against 0 first.
77351 */
77352-static __always_inline unsigned long __fls(unsigned long word)
77353+static __always_inline unsigned long __intentional_overflow(-1) __fls(unsigned long word)
77354 {
77355 int num = BITS_PER_LONG - 1;
77356
77357diff --git a/include/asm-generic/bitops/fls.h b/include/asm-generic/bitops/fls.h
77358index 0576d1f..dad6c71 100644
77359--- a/include/asm-generic/bitops/fls.h
77360+++ b/include/asm-generic/bitops/fls.h
77361@@ -9,7 +9,7 @@
77362 * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
77363 */
77364
77365-static __always_inline int fls(int x)
77366+static __always_inline int __intentional_overflow(-1) fls(int x)
77367 {
77368 int r = 32;
77369
77370diff --git a/include/asm-generic/bitops/fls64.h b/include/asm-generic/bitops/fls64.h
77371index b097cf8..3d40e14 100644
77372--- a/include/asm-generic/bitops/fls64.h
77373+++ b/include/asm-generic/bitops/fls64.h
77374@@ -15,7 +15,7 @@
77375 * at position 64.
77376 */
77377 #if BITS_PER_LONG == 32
77378-static __always_inline int fls64(__u64 x)
77379+static __always_inline int __intentional_overflow(-1) fls64(__u64 x)
77380 {
77381 __u32 h = x >> 32;
77382 if (h)
77383@@ -23,7 +23,7 @@ static __always_inline int fls64(__u64 x)
77384 return fls(x);
77385 }
77386 #elif BITS_PER_LONG == 64
77387-static __always_inline int fls64(__u64 x)
77388+static __always_inline int __intentional_overflow(-1) fls64(__u64 x)
77389 {
77390 if (x == 0)
77391 return 0;
77392diff --git a/include/asm-generic/cache.h b/include/asm-generic/cache.h
77393index 1bfcfe5..e04c5c9 100644
77394--- a/include/asm-generic/cache.h
77395+++ b/include/asm-generic/cache.h
77396@@ -6,7 +6,7 @@
77397 * cache lines need to provide their own cache.h.
77398 */
77399
77400-#define L1_CACHE_SHIFT 5
77401-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
77402+#define L1_CACHE_SHIFT 5UL
77403+#define L1_CACHE_BYTES (1UL << L1_CACHE_SHIFT)
77404
77405 #endif /* __ASM_GENERIC_CACHE_H */
77406diff --git a/include/asm-generic/emergency-restart.h b/include/asm-generic/emergency-restart.h
77407index 0d68a1e..b74a761 100644
77408--- a/include/asm-generic/emergency-restart.h
77409+++ b/include/asm-generic/emergency-restart.h
77410@@ -1,7 +1,7 @@
77411 #ifndef _ASM_GENERIC_EMERGENCY_RESTART_H
77412 #define _ASM_GENERIC_EMERGENCY_RESTART_H
77413
77414-static inline void machine_emergency_restart(void)
77415+static inline __noreturn void machine_emergency_restart(void)
77416 {
77417 machine_restart(NULL);
77418 }
77419diff --git a/include/asm-generic/kmap_types.h b/include/asm-generic/kmap_types.h
77420index 90f99c7..00ce236 100644
77421--- a/include/asm-generic/kmap_types.h
77422+++ b/include/asm-generic/kmap_types.h
77423@@ -2,9 +2,9 @@
77424 #define _ASM_GENERIC_KMAP_TYPES_H
77425
77426 #ifdef __WITH_KM_FENCE
77427-# define KM_TYPE_NR 41
77428+# define KM_TYPE_NR 42
77429 #else
77430-# define KM_TYPE_NR 20
77431+# define KM_TYPE_NR 21
77432 #endif
77433
77434 #endif
77435diff --git a/include/asm-generic/local.h b/include/asm-generic/local.h
77436index 9ceb03b..62b0b8f 100644
77437--- a/include/asm-generic/local.h
77438+++ b/include/asm-generic/local.h
77439@@ -23,24 +23,37 @@ typedef struct
77440 atomic_long_t a;
77441 } local_t;
77442
77443+typedef struct {
77444+ atomic_long_unchecked_t a;
77445+} local_unchecked_t;
77446+
77447 #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
77448
77449 #define local_read(l) atomic_long_read(&(l)->a)
77450+#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
77451 #define local_set(l,i) atomic_long_set((&(l)->a),(i))
77452+#define local_set_unchecked(l,i) atomic_long_set_unchecked((&(l)->a),(i))
77453 #define local_inc(l) atomic_long_inc(&(l)->a)
77454+#define local_inc_unchecked(l) atomic_long_inc_unchecked(&(l)->a)
77455 #define local_dec(l) atomic_long_dec(&(l)->a)
77456+#define local_dec_unchecked(l) atomic_long_dec_unchecked(&(l)->a)
77457 #define local_add(i,l) atomic_long_add((i),(&(l)->a))
77458+#define local_add_unchecked(i,l) atomic_long_add_unchecked((i),(&(l)->a))
77459 #define local_sub(i,l) atomic_long_sub((i),(&(l)->a))
77460+#define local_sub_unchecked(i,l) atomic_long_sub_unchecked((i),(&(l)->a))
77461
77462 #define local_sub_and_test(i, l) atomic_long_sub_and_test((i), (&(l)->a))
77463 #define local_dec_and_test(l) atomic_long_dec_and_test(&(l)->a)
77464 #define local_inc_and_test(l) atomic_long_inc_and_test(&(l)->a)
77465 #define local_add_negative(i, l) atomic_long_add_negative((i), (&(l)->a))
77466 #define local_add_return(i, l) atomic_long_add_return((i), (&(l)->a))
77467+#define local_add_return_unchecked(i, l) atomic_long_add_return_unchecked((i), (&(l)->a))
77468 #define local_sub_return(i, l) atomic_long_sub_return((i), (&(l)->a))
77469 #define local_inc_return(l) atomic_long_inc_return(&(l)->a)
77470+#define local_dec_return(l) atomic_long_dec_return(&(l)->a)
77471
77472 #define local_cmpxchg(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n))
77473+#define local_cmpxchg_unchecked(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n))
77474 #define local_xchg(l, n) atomic_long_xchg((&(l)->a), (n))
77475 #define local_add_unless(l, _a, u) atomic_long_add_unless((&(l)->a), (_a), (u))
77476 #define local_inc_not_zero(l) atomic_long_inc_not_zero(&(l)->a)
77477diff --git a/include/asm-generic/pgtable-nopmd.h b/include/asm-generic/pgtable-nopmd.h
77478index 725612b..9cc513a 100644
77479--- a/include/asm-generic/pgtable-nopmd.h
77480+++ b/include/asm-generic/pgtable-nopmd.h
77481@@ -1,14 +1,19 @@
77482 #ifndef _PGTABLE_NOPMD_H
77483 #define _PGTABLE_NOPMD_H
77484
77485-#ifndef __ASSEMBLY__
77486-
77487 #include <asm-generic/pgtable-nopud.h>
77488
77489-struct mm_struct;
77490-
77491 #define __PAGETABLE_PMD_FOLDED
77492
77493+#define PMD_SHIFT PUD_SHIFT
77494+#define PTRS_PER_PMD 1
77495+#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
77496+#define PMD_MASK (~(PMD_SIZE-1))
77497+
77498+#ifndef __ASSEMBLY__
77499+
77500+struct mm_struct;
77501+
77502 /*
77503 * Having the pmd type consist of a pud gets the size right, and allows
77504 * us to conceptually access the pud entry that this pmd is folded into
77505@@ -16,11 +21,6 @@ struct mm_struct;
77506 */
77507 typedef struct { pud_t pud; } pmd_t;
77508
77509-#define PMD_SHIFT PUD_SHIFT
77510-#define PTRS_PER_PMD 1
77511-#define PMD_SIZE (1UL << PMD_SHIFT)
77512-#define PMD_MASK (~(PMD_SIZE-1))
77513-
77514 /*
77515 * The "pud_xxx()" functions here are trivial for a folded two-level
77516 * setup: the pmd is never bad, and a pmd always exists (as it's folded
77517diff --git a/include/asm-generic/pgtable-nopud.h b/include/asm-generic/pgtable-nopud.h
77518index 810431d..0ec4804f 100644
77519--- a/include/asm-generic/pgtable-nopud.h
77520+++ b/include/asm-generic/pgtable-nopud.h
77521@@ -1,10 +1,15 @@
77522 #ifndef _PGTABLE_NOPUD_H
77523 #define _PGTABLE_NOPUD_H
77524
77525-#ifndef __ASSEMBLY__
77526-
77527 #define __PAGETABLE_PUD_FOLDED
77528
77529+#define PUD_SHIFT PGDIR_SHIFT
77530+#define PTRS_PER_PUD 1
77531+#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
77532+#define PUD_MASK (~(PUD_SIZE-1))
77533+
77534+#ifndef __ASSEMBLY__
77535+
77536 /*
77537 * Having the pud type consist of a pgd gets the size right, and allows
77538 * us to conceptually access the pgd entry that this pud is folded into
77539@@ -12,11 +17,6 @@
77540 */
77541 typedef struct { pgd_t pgd; } pud_t;
77542
77543-#define PUD_SHIFT PGDIR_SHIFT
77544-#define PTRS_PER_PUD 1
77545-#define PUD_SIZE (1UL << PUD_SHIFT)
77546-#define PUD_MASK (~(PUD_SIZE-1))
77547-
77548 /*
77549 * The "pgd_xxx()" functions here are trivial for a folded two-level
77550 * setup: the pud is never bad, and a pud always exists (as it's folded
77551@@ -29,6 +29,7 @@ static inline void pgd_clear(pgd_t *pgd) { }
77552 #define pud_ERROR(pud) (pgd_ERROR((pud).pgd))
77553
77554 #define pgd_populate(mm, pgd, pud) do { } while (0)
77555+#define pgd_populate_kernel(mm, pgd, pud) do { } while (0)
77556 /*
77557 * (puds are folded into pgds so this doesn't get actually called,
77558 * but the define is needed for a generic inline function.)
77559diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
77560index 38a7437..47f62a4 100644
77561--- a/include/asm-generic/pgtable.h
77562+++ b/include/asm-generic/pgtable.h
77563@@ -802,6 +802,22 @@ static inline void pmdp_set_numa(struct mm_struct *mm, unsigned long addr,
77564 }
77565 #endif /* CONFIG_NUMA_BALANCING */
77566
77567+#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
77568+#ifdef CONFIG_PAX_KERNEXEC
77569+#error KERNEXEC requires pax_open_kernel
77570+#else
77571+static inline unsigned long pax_open_kernel(void) { return 0; }
77572+#endif
77573+#endif
77574+
77575+#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
77576+#ifdef CONFIG_PAX_KERNEXEC
77577+#error KERNEXEC requires pax_close_kernel
77578+#else
77579+static inline unsigned long pax_close_kernel(void) { return 0; }
77580+#endif
77581+#endif
77582+
77583 #endif /* CONFIG_MMU */
77584
77585 #endif /* !__ASSEMBLY__ */
77586diff --git a/include/asm-generic/uaccess.h b/include/asm-generic/uaccess.h
77587index 72d8803..cb9749c 100644
77588--- a/include/asm-generic/uaccess.h
77589+++ b/include/asm-generic/uaccess.h
77590@@ -343,4 +343,20 @@ clear_user(void __user *to, unsigned long n)
77591 return __clear_user(to, n);
77592 }
77593
77594+#ifndef __HAVE_ARCH_PAX_OPEN_USERLAND
77595+#ifdef CONFIG_PAX_MEMORY_UDEREF
77596+#error UDEREF requires pax_open_userland
77597+#else
77598+static inline unsigned long pax_open_userland(void) { return 0; }
77599+#endif
77600+#endif
77601+
77602+#ifndef __HAVE_ARCH_PAX_CLOSE_USERLAND
77603+#ifdef CONFIG_PAX_MEMORY_UDEREF
77604+#error UDEREF requires pax_close_userland
77605+#else
77606+static inline unsigned long pax_close_userland(void) { return 0; }
77607+#endif
77608+#endif
77609+
77610 #endif /* __ASM_GENERIC_UACCESS_H */
77611diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
77612index bc2121f..2f41f9a 100644
77613--- a/include/asm-generic/vmlinux.lds.h
77614+++ b/include/asm-generic/vmlinux.lds.h
77615@@ -232,6 +232,7 @@
77616 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
77617 VMLINUX_SYMBOL(__start_rodata) = .; \
77618 *(.rodata) *(.rodata.*) \
77619+ *(.data..read_only) \
77620 *(__vermagic) /* Kernel version magic */ \
77621 . = ALIGN(8); \
77622 VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \
77623@@ -716,17 +717,18 @@
77624 * section in the linker script will go there too. @phdr should have
77625 * a leading colon.
77626 *
77627- * Note that this macros defines __per_cpu_load as an absolute symbol.
77628+ * Note that this macros defines per_cpu_load as an absolute symbol.
77629 * If there is no need to put the percpu section at a predetermined
77630 * address, use PERCPU_SECTION.
77631 */
77632 #define PERCPU_VADDR(cacheline, vaddr, phdr) \
77633- VMLINUX_SYMBOL(__per_cpu_load) = .; \
77634- .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
77635+ per_cpu_load = .; \
77636+ .data..percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \
77637 - LOAD_OFFSET) { \
77638+ VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \
77639 PERCPU_INPUT(cacheline) \
77640 } phdr \
77641- . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu);
77642+ . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data..percpu);
77643
77644 /**
77645 * PERCPU_SECTION - define output section for percpu area, simple version
77646diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h
77647index e73c19e..5b89e00 100644
77648--- a/include/crypto/algapi.h
77649+++ b/include/crypto/algapi.h
77650@@ -34,7 +34,7 @@ struct crypto_type {
77651 unsigned int maskclear;
77652 unsigned int maskset;
77653 unsigned int tfmsize;
77654-};
77655+} __do_const;
77656
77657 struct crypto_instance {
77658 struct crypto_alg alg;
77659diff --git a/include/drm/drmP.h b/include/drm/drmP.h
77660index 04a7f31..668d424 100644
77661--- a/include/drm/drmP.h
77662+++ b/include/drm/drmP.h
77663@@ -67,6 +67,7 @@
77664 #include <linux/workqueue.h>
77665 #include <linux/poll.h>
77666 #include <asm/pgalloc.h>
77667+#include <asm/local.h>
77668 #include <drm/drm.h>
77669 #include <drm/drm_sarea.h>
77670 #include <drm/drm_vma_manager.h>
77671@@ -297,10 +298,12 @@ do { \
77672 * \param cmd command.
77673 * \param arg argument.
77674 */
77675-typedef int drm_ioctl_t(struct drm_device *dev, void *data,
77676+typedef int (* const drm_ioctl_t)(struct drm_device *dev, void *data,
77677+ struct drm_file *file_priv);
77678+typedef int (* drm_ioctl_no_const_t)(struct drm_device *dev, void *data,
77679 struct drm_file *file_priv);
77680
77681-typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd,
77682+typedef int (* const drm_ioctl_compat_t)(struct file *filp, unsigned int cmd,
77683 unsigned long arg);
77684
77685 #define DRM_IOCTL_NR(n) _IOC_NR(n)
77686@@ -316,10 +319,10 @@ typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd,
77687 struct drm_ioctl_desc {
77688 unsigned int cmd;
77689 int flags;
77690- drm_ioctl_t *func;
77691+ drm_ioctl_t func;
77692 unsigned int cmd_drv;
77693 const char *name;
77694-};
77695+} __do_const;
77696
77697 /**
77698 * Creates a driver or general drm_ioctl_desc array entry for the given
77699@@ -1022,7 +1025,8 @@ struct drm_info_list {
77700 int (*show)(struct seq_file*, void*); /** show callback */
77701 u32 driver_features; /**< Required driver features for this entry */
77702 void *data;
77703-};
77704+} __do_const;
77705+typedef struct drm_info_list __no_const drm_info_list_no_const;
77706
77707 /**
77708 * debugfs node structure. This structure represents a debugfs file.
77709@@ -1106,7 +1110,7 @@ struct drm_device {
77710
77711 /** \name Usage Counters */
77712 /*@{ */
77713- int open_count; /**< Outstanding files open */
77714+ local_t open_count; /**< Outstanding files open */
77715 int buf_use; /**< Buffers in use -- cannot alloc */
77716 atomic_t buf_alloc; /**< Buffer allocation in progress */
77717 /*@} */
77718diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
77719index b1388b5..e1d1163 100644
77720--- a/include/drm/drm_crtc_helper.h
77721+++ b/include/drm/drm_crtc_helper.h
77722@@ -109,7 +109,7 @@ struct drm_encoder_helper_funcs {
77723 struct drm_connector *connector);
77724 /* disable encoder when not in use - more explicit than dpms off */
77725 void (*disable)(struct drm_encoder *encoder);
77726-};
77727+} __no_const;
77728
77729 /**
77730 * drm_connector_helper_funcs - helper operations for connectors
77731diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h
77732index 940ece4..8cb727f 100644
77733--- a/include/drm/i915_pciids.h
77734+++ b/include/drm/i915_pciids.h
77735@@ -37,7 +37,7 @@
77736 */
77737 #define INTEL_VGA_DEVICE(id, info) { \
77738 0x8086, id, \
77739- ~0, ~0, \
77740+ PCI_ANY_ID, PCI_ANY_ID, \
77741 0x030000, 0xff0000, \
77742 (unsigned long) info }
77743
77744diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h
77745index 72dcbe8..8db58d7 100644
77746--- a/include/drm/ttm/ttm_memory.h
77747+++ b/include/drm/ttm/ttm_memory.h
77748@@ -48,7 +48,7 @@
77749
77750 struct ttm_mem_shrink {
77751 int (*do_shrink) (struct ttm_mem_shrink *);
77752-};
77753+} __no_const;
77754
77755 /**
77756 * struct ttm_mem_global - Global memory accounting structure.
77757diff --git a/include/drm/ttm/ttm_page_alloc.h b/include/drm/ttm/ttm_page_alloc.h
77758index 49a8284..9643967 100644
77759--- a/include/drm/ttm/ttm_page_alloc.h
77760+++ b/include/drm/ttm/ttm_page_alloc.h
77761@@ -80,6 +80,7 @@ void ttm_dma_page_alloc_fini(void);
77762 */
77763 extern int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data);
77764
77765+struct device;
77766 extern int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev);
77767 extern void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev);
77768
77769diff --git a/include/keys/asymmetric-subtype.h b/include/keys/asymmetric-subtype.h
77770index 4b840e8..155d235 100644
77771--- a/include/keys/asymmetric-subtype.h
77772+++ b/include/keys/asymmetric-subtype.h
77773@@ -37,7 +37,7 @@ struct asymmetric_key_subtype {
77774 /* Verify the signature on a key of this subtype (optional) */
77775 int (*verify_signature)(const struct key *key,
77776 const struct public_key_signature *sig);
77777-};
77778+} __do_const;
77779
77780 /**
77781 * asymmetric_key_subtype - Get the subtype from an asymmetric key
77782diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h
77783index c1da539..1dcec55 100644
77784--- a/include/linux/atmdev.h
77785+++ b/include/linux/atmdev.h
77786@@ -28,7 +28,7 @@ struct compat_atm_iobuf {
77787 #endif
77788
77789 struct k_atm_aal_stats {
77790-#define __HANDLE_ITEM(i) atomic_t i
77791+#define __HANDLE_ITEM(i) atomic_unchecked_t i
77792 __AAL_STAT_ITEMS
77793 #undef __HANDLE_ITEM
77794 };
77795@@ -200,7 +200,7 @@ struct atmdev_ops { /* only send is required */
77796 int (*change_qos)(struct atm_vcc *vcc,struct atm_qos *qos,int flags);
77797 int (*proc_read)(struct atm_dev *dev,loff_t *pos,char *page);
77798 struct module *owner;
77799-};
77800+} __do_const ;
77801
77802 struct atmphy_ops {
77803 int (*start)(struct atm_dev *dev);
77804diff --git a/include/linux/audit.h b/include/linux/audit.h
77805index ec1464d..833274b 100644
77806--- a/include/linux/audit.h
77807+++ b/include/linux/audit.h
77808@@ -196,7 +196,7 @@ static inline void audit_ptrace(struct task_struct *t)
77809 extern unsigned int audit_serial(void);
77810 extern int auditsc_get_stamp(struct audit_context *ctx,
77811 struct timespec *t, unsigned int *serial);
77812-extern int audit_set_loginuid(kuid_t loginuid);
77813+extern int __intentional_overflow(-1) audit_set_loginuid(kuid_t loginuid);
77814
77815 static inline kuid_t audit_get_loginuid(struct task_struct *tsk)
77816 {
77817diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
77818index b4a745d..e3c0942 100644
77819--- a/include/linux/binfmts.h
77820+++ b/include/linux/binfmts.h
77821@@ -45,7 +45,7 @@ struct linux_binprm {
77822 unsigned interp_data;
77823 unsigned long loader, exec;
77824 char tcomm[TASK_COMM_LEN];
77825-};
77826+} __randomize_layout;
77827
77828 #define BINPRM_FLAGS_ENFORCE_NONDUMP_BIT 0
77829 #define BINPRM_FLAGS_ENFORCE_NONDUMP (1 << BINPRM_FLAGS_ENFORCE_NONDUMP_BIT)
77830@@ -74,8 +74,10 @@ struct linux_binfmt {
77831 int (*load_binary)(struct linux_binprm *);
77832 int (*load_shlib)(struct file *);
77833 int (*core_dump)(struct coredump_params *cprm);
77834+ void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
77835+ void (*handle_mmap)(struct file *);
77836 unsigned long min_coredump; /* minimal dump size */
77837-};
77838+} __do_const __randomize_layout;
77839
77840 extern void __register_binfmt(struct linux_binfmt *fmt, int insert);
77841
77842diff --git a/include/linux/bitops.h b/include/linux/bitops.h
77843index be5fd38..d71192a 100644
77844--- a/include/linux/bitops.h
77845+++ b/include/linux/bitops.h
77846@@ -102,7 +102,7 @@ static inline __u64 ror64(__u64 word, unsigned int shift)
77847 * @word: value to rotate
77848 * @shift: bits to roll
77849 */
77850-static inline __u32 rol32(__u32 word, unsigned int shift)
77851+static inline __u32 __intentional_overflow(-1) rol32(__u32 word, unsigned int shift)
77852 {
77853 return (word << shift) | (word >> (32 - shift));
77854 }
77855@@ -112,7 +112,7 @@ static inline __u32 rol32(__u32 word, unsigned int shift)
77856 * @word: value to rotate
77857 * @shift: bits to roll
77858 */
77859-static inline __u32 ror32(__u32 word, unsigned int shift)
77860+static inline __u32 __intentional_overflow(-1) ror32(__u32 word, unsigned int shift)
77861 {
77862 return (word >> shift) | (word << (32 - shift));
77863 }
77864@@ -168,7 +168,7 @@ static inline __s32 sign_extend32(__u32 value, int index)
77865 return (__s32)(value << shift) >> shift;
77866 }
77867
77868-static inline unsigned fls_long(unsigned long l)
77869+static inline unsigned __intentional_overflow(-1) fls_long(unsigned long l)
77870 {
77871 if (sizeof(l) == 4)
77872 return fls(l);
77873diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
77874index 4afa4f8..1ed7824 100644
77875--- a/include/linux/blkdev.h
77876+++ b/include/linux/blkdev.h
77877@@ -1572,7 +1572,7 @@ struct block_device_operations {
77878 /* this callback is with swap_lock and sometimes page table lock held */
77879 void (*swap_slot_free_notify) (struct block_device *, unsigned long);
77880 struct module *owner;
77881-};
77882+} __do_const;
77883
77884 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
77885 unsigned long);
77886diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
77887index afc1343..9735539 100644
77888--- a/include/linux/blktrace_api.h
77889+++ b/include/linux/blktrace_api.h
77890@@ -25,7 +25,7 @@ struct blk_trace {
77891 struct dentry *dropped_file;
77892 struct dentry *msg_file;
77893 struct list_head running_list;
77894- atomic_t dropped;
77895+ atomic_unchecked_t dropped;
77896 };
77897
77898 extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
77899diff --git a/include/linux/cache.h b/include/linux/cache.h
77900index 17e7e82..1d7da26 100644
77901--- a/include/linux/cache.h
77902+++ b/include/linux/cache.h
77903@@ -16,6 +16,14 @@
77904 #define __read_mostly
77905 #endif
77906
77907+#ifndef __read_only
77908+#ifdef CONFIG_PAX_KERNEXEC
77909+#error KERNEXEC requires __read_only
77910+#else
77911+#define __read_only __read_mostly
77912+#endif
77913+#endif
77914+
77915 #ifndef ____cacheline_aligned
77916 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
77917 #endif
77918diff --git a/include/linux/capability.h b/include/linux/capability.h
77919index a6ee1f9..e1ca49d 100644
77920--- a/include/linux/capability.h
77921+++ b/include/linux/capability.h
77922@@ -212,8 +212,13 @@ extern bool capable(int cap);
77923 extern bool ns_capable(struct user_namespace *ns, int cap);
77924 extern bool inode_capable(const struct inode *inode, int cap);
77925 extern bool file_ns_capable(const struct file *file, struct user_namespace *ns, int cap);
77926+extern bool capable_nolog(int cap);
77927+extern bool ns_capable_nolog(struct user_namespace *ns, int cap);
77928+extern bool inode_capable_nolog(const struct inode *inode, int cap);
77929
77930 /* audit system wants to get cap info from files as well */
77931 extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps);
77932
77933+extern int is_privileged_binary(const struct dentry *dentry);
77934+
77935 #endif /* !_LINUX_CAPABILITY_H */
77936diff --git a/include/linux/cdrom.h b/include/linux/cdrom.h
77937index 8609d57..86e4d79 100644
77938--- a/include/linux/cdrom.h
77939+++ b/include/linux/cdrom.h
77940@@ -87,7 +87,6 @@ struct cdrom_device_ops {
77941
77942 /* driver specifications */
77943 const int capability; /* capability flags */
77944- int n_minors; /* number of active minor devices */
77945 /* handle uniform packets for scsi type devices (scsi,atapi) */
77946 int (*generic_packet) (struct cdrom_device_info *,
77947 struct packet_command *);
77948diff --git a/include/linux/cleancache.h b/include/linux/cleancache.h
77949index 4ce9056..86caac6 100644
77950--- a/include/linux/cleancache.h
77951+++ b/include/linux/cleancache.h
77952@@ -31,7 +31,7 @@ struct cleancache_ops {
77953 void (*invalidate_page)(int, struct cleancache_filekey, pgoff_t);
77954 void (*invalidate_inode)(int, struct cleancache_filekey);
77955 void (*invalidate_fs)(int);
77956-};
77957+} __no_const;
77958
77959 extern struct cleancache_ops *
77960 cleancache_register_ops(struct cleancache_ops *ops);
77961diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
77962index 939533d..cf0a57c 100644
77963--- a/include/linux/clk-provider.h
77964+++ b/include/linux/clk-provider.h
77965@@ -166,6 +166,7 @@ struct clk_ops {
77966 unsigned long parent_accuracy);
77967 void (*init)(struct clk_hw *hw);
77968 };
77969+typedef struct clk_ops __no_const clk_ops_no_const;
77970
77971 /**
77972 * struct clk_init_data - holds init data that's common to all clocks and is
77973diff --git a/include/linux/compat.h b/include/linux/compat.h
77974index 3f448c6..df3ce1d 100644
77975--- a/include/linux/compat.h
77976+++ b/include/linux/compat.h
77977@@ -313,7 +313,7 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
77978 compat_size_t __user *len_ptr);
77979
77980 asmlinkage long compat_sys_ipc(u32, int, int, u32, compat_uptr_t, u32);
77981-asmlinkage long compat_sys_shmat(int shmid, compat_uptr_t shmaddr, int shmflg);
77982+asmlinkage long compat_sys_shmat(int shmid, compat_uptr_t shmaddr, int shmflg) __intentional_overflow(0);
77983 asmlinkage long compat_sys_semctl(int semid, int semnum, int cmd, int arg);
77984 asmlinkage long compat_sys_msgsnd(int msqid, compat_uptr_t msgp,
77985 compat_ssize_t msgsz, int msgflg);
77986@@ -420,7 +420,7 @@ extern int compat_ptrace_request(struct task_struct *child,
77987 extern long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
77988 compat_ulong_t addr, compat_ulong_t data);
77989 asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
77990- compat_long_t addr, compat_long_t data);
77991+ compat_ulong_t addr, compat_ulong_t data);
77992
77993 asmlinkage long compat_sys_lookup_dcookie(u32, u32, char __user *, compat_size_t);
77994 /*
77995diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
77996index 2507fd2..55203f8 100644
77997--- a/include/linux/compiler-gcc4.h
77998+++ b/include/linux/compiler-gcc4.h
77999@@ -39,9 +39,34 @@
78000 # define __compiletime_warning(message) __attribute__((warning(message)))
78001 # define __compiletime_error(message) __attribute__((error(message)))
78002 #endif /* __CHECKER__ */
78003+
78004+#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
78005+#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
78006+#define __bos0(ptr) __bos((ptr), 0)
78007+#define __bos1(ptr) __bos((ptr), 1)
78008 #endif /* GCC_VERSION >= 40300 */
78009
78010 #if GCC_VERSION >= 40500
78011+
78012+#ifdef RANDSTRUCT_PLUGIN
78013+#define __randomize_layout __attribute__((randomize_layout))
78014+#define __no_randomize_layout __attribute__((no_randomize_layout))
78015+#endif
78016+
78017+#ifdef CONSTIFY_PLUGIN
78018+#define __no_const __attribute__((no_const))
78019+#define __do_const __attribute__((do_const))
78020+#endif
78021+
78022+#ifdef SIZE_OVERFLOW_PLUGIN
78023+#define __size_overflow(...) __attribute__((size_overflow(__VA_ARGS__)))
78024+#define __intentional_overflow(...) __attribute__((intentional_overflow(__VA_ARGS__)))
78025+#endif
78026+
78027+#ifdef LATENT_ENTROPY_PLUGIN
78028+#define __latent_entropy __attribute__((latent_entropy))
78029+#endif
78030+
78031 /*
78032 * Mark a position in code as unreachable. This can be used to
78033 * suppress control flow warnings after asm blocks that transfer
78034diff --git a/include/linux/compiler.h b/include/linux/compiler.h
78035index 2472740..4857634 100644
78036--- a/include/linux/compiler.h
78037+++ b/include/linux/compiler.h
78038@@ -5,11 +5,14 @@
78039
78040 #ifdef __CHECKER__
78041 # define __user __attribute__((noderef, address_space(1)))
78042+# define __force_user __force __user
78043 # define __kernel __attribute__((address_space(0)))
78044+# define __force_kernel __force __kernel
78045 # define __safe __attribute__((safe))
78046 # define __force __attribute__((force))
78047 # define __nocast __attribute__((nocast))
78048 # define __iomem __attribute__((noderef, address_space(2)))
78049+# define __force_iomem __force __iomem
78050 # define __must_hold(x) __attribute__((context(x,1,1)))
78051 # define __acquires(x) __attribute__((context(x,0,1)))
78052 # define __releases(x) __attribute__((context(x,1,0)))
78053@@ -17,20 +20,37 @@
78054 # define __release(x) __context__(x,-1)
78055 # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
78056 # define __percpu __attribute__((noderef, address_space(3)))
78057+# define __force_percpu __force __percpu
78058 #ifdef CONFIG_SPARSE_RCU_POINTER
78059 # define __rcu __attribute__((noderef, address_space(4)))
78060+# define __force_rcu __force __rcu
78061 #else
78062 # define __rcu
78063+# define __force_rcu
78064 #endif
78065 extern void __chk_user_ptr(const volatile void __user *);
78066 extern void __chk_io_ptr(const volatile void __iomem *);
78067 #else
78068-# define __user
78069-# define __kernel
78070+# ifdef CHECKER_PLUGIN
78071+//# define __user
78072+//# define __force_user
78073+//# define __kernel
78074+//# define __force_kernel
78075+# else
78076+# ifdef STRUCTLEAK_PLUGIN
78077+# define __user __attribute__((user))
78078+# else
78079+# define __user
78080+# endif
78081+# define __force_user
78082+# define __kernel
78083+# define __force_kernel
78084+# endif
78085 # define __safe
78086 # define __force
78087 # define __nocast
78088 # define __iomem
78089+# define __force_iomem
78090 # define __chk_user_ptr(x) (void)0
78091 # define __chk_io_ptr(x) (void)0
78092 # define __builtin_warning(x, y...) (1)
78093@@ -41,7 +61,9 @@ extern void __chk_io_ptr(const volatile void __iomem *);
78094 # define __release(x) (void)0
78095 # define __cond_lock(x,c) (c)
78096 # define __percpu
78097+# define __force_percpu
78098 # define __rcu
78099+# define __force_rcu
78100 #endif
78101
78102 /* Indirect macros required for expanded argument pasting, eg. __LINE__. */
78103@@ -279,6 +301,34 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
78104 # define __attribute_const__ /* unimplemented */
78105 #endif
78106
78107+#ifndef __randomize_layout
78108+# define __randomize_layout
78109+#endif
78110+
78111+#ifndef __no_randomize_layout
78112+# define __no_randomize_layout
78113+#endif
78114+
78115+#ifndef __no_const
78116+# define __no_const
78117+#endif
78118+
78119+#ifndef __do_const
78120+# define __do_const
78121+#endif
78122+
78123+#ifndef __size_overflow
78124+# define __size_overflow(...)
78125+#endif
78126+
78127+#ifndef __intentional_overflow
78128+# define __intentional_overflow(...)
78129+#endif
78130+
78131+#ifndef __latent_entropy
78132+# define __latent_entropy
78133+#endif
78134+
78135 /*
78136 * Tell gcc if a function is cold. The compiler will assume any path
78137 * directly leading to the call is unlikely.
78138@@ -288,6 +338,22 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
78139 #define __cold
78140 #endif
78141
78142+#ifndef __alloc_size
78143+#define __alloc_size(...)
78144+#endif
78145+
78146+#ifndef __bos
78147+#define __bos(ptr, arg)
78148+#endif
78149+
78150+#ifndef __bos0
78151+#define __bos0(ptr)
78152+#endif
78153+
78154+#ifndef __bos1
78155+#define __bos1(ptr)
78156+#endif
78157+
78158 /* Simple shorthand for a section definition */
78159 #ifndef __section
78160 # define __section(S) __attribute__ ((__section__(#S)))
78161@@ -362,7 +428,8 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
78162 * use is to mediate communication between process-level code and irq/NMI
78163 * handlers, all running on the same CPU.
78164 */
78165-#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
78166+#define ACCESS_ONCE(x) (*(volatile const typeof(x) *)&(x))
78167+#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
78168
78169 /* Ignore/forbid kprobes attach on very low level functions marked by this attribute: */
78170 #ifdef CONFIG_KPROBES
78171diff --git a/include/linux/completion.h b/include/linux/completion.h
78172index 5d5aaae..0ea9b84 100644
78173--- a/include/linux/completion.h
78174+++ b/include/linux/completion.h
78175@@ -90,16 +90,16 @@ static inline void reinit_completion(struct completion *x)
78176
78177 extern void wait_for_completion(struct completion *);
78178 extern void wait_for_completion_io(struct completion *);
78179-extern int wait_for_completion_interruptible(struct completion *x);
78180-extern int wait_for_completion_killable(struct completion *x);
78181+extern int wait_for_completion_interruptible(struct completion *x) __intentional_overflow(-1);
78182+extern int wait_for_completion_killable(struct completion *x) __intentional_overflow(-1);
78183 extern unsigned long wait_for_completion_timeout(struct completion *x,
78184- unsigned long timeout);
78185+ unsigned long timeout) __intentional_overflow(-1);
78186 extern unsigned long wait_for_completion_io_timeout(struct completion *x,
78187- unsigned long timeout);
78188+ unsigned long timeout) __intentional_overflow(-1);
78189 extern long wait_for_completion_interruptible_timeout(
78190- struct completion *x, unsigned long timeout);
78191+ struct completion *x, unsigned long timeout) __intentional_overflow(-1);
78192 extern long wait_for_completion_killable_timeout(
78193- struct completion *x, unsigned long timeout);
78194+ struct completion *x, unsigned long timeout) __intentional_overflow(-1);
78195 extern bool try_wait_for_completion(struct completion *x);
78196 extern bool completion_done(struct completion *x);
78197
78198diff --git a/include/linux/configfs.h b/include/linux/configfs.h
78199index 34025df..d94bbbc 100644
78200--- a/include/linux/configfs.h
78201+++ b/include/linux/configfs.h
78202@@ -125,7 +125,7 @@ struct configfs_attribute {
78203 const char *ca_name;
78204 struct module *ca_owner;
78205 umode_t ca_mode;
78206-};
78207+} __do_const;
78208
78209 /*
78210 * Users often need to create attribute structures for their configurable
78211diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
78212index 4d89e0e..5281847 100644
78213--- a/include/linux/cpufreq.h
78214+++ b/include/linux/cpufreq.h
78215@@ -191,6 +191,7 @@ struct global_attr {
78216 ssize_t (*store)(struct kobject *a, struct attribute *b,
78217 const char *c, size_t count);
78218 };
78219+typedef struct global_attr __no_const global_attr_no_const;
78220
78221 #define define_one_global_ro(_name) \
78222 static struct global_attr _name = \
78223@@ -232,7 +233,7 @@ struct cpufreq_driver {
78224 bool boost_supported;
78225 bool boost_enabled;
78226 int (*set_boost) (int state);
78227-};
78228+} __do_const;
78229
78230 /* flags */
78231 #define CPUFREQ_STICKY (1 << 0) /* driver isn't removed even if
78232diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
78233index 50fcbb0..9d2dbd9 100644
78234--- a/include/linux/cpuidle.h
78235+++ b/include/linux/cpuidle.h
78236@@ -50,7 +50,8 @@ struct cpuidle_state {
78237 int index);
78238
78239 int (*enter_dead) (struct cpuidle_device *dev, int index);
78240-};
78241+} __do_const;
78242+typedef struct cpuidle_state __no_const cpuidle_state_no_const;
78243
78244 /* Idle State Flags */
78245 #define CPUIDLE_FLAG_TIME_VALID (0x01) /* is residency time measurable? */
78246@@ -192,7 +193,7 @@ struct cpuidle_governor {
78247 void (*reflect) (struct cpuidle_device *dev, int index);
78248
78249 struct module *owner;
78250-};
78251+} __do_const;
78252
78253 #ifdef CONFIG_CPU_IDLE
78254 extern int cpuidle_register_governor(struct cpuidle_governor *gov);
78255diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
78256index d08e4d2..95fad61 100644
78257--- a/include/linux/cpumask.h
78258+++ b/include/linux/cpumask.h
78259@@ -118,17 +118,17 @@ static inline unsigned int cpumask_first(const struct cpumask *srcp)
78260 }
78261
78262 /* Valid inputs for n are -1 and 0. */
78263-static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
78264+static inline unsigned int __intentional_overflow(-1) cpumask_next(int n, const struct cpumask *srcp)
78265 {
78266 return n+1;
78267 }
78268
78269-static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
78270+static inline unsigned int __intentional_overflow(-1) cpumask_next_zero(int n, const struct cpumask *srcp)
78271 {
78272 return n+1;
78273 }
78274
78275-static inline unsigned int cpumask_next_and(int n,
78276+static inline unsigned int __intentional_overflow(-1) cpumask_next_and(int n,
78277 const struct cpumask *srcp,
78278 const struct cpumask *andp)
78279 {
78280@@ -167,7 +167,7 @@ static inline unsigned int cpumask_first(const struct cpumask *srcp)
78281 *
78282 * Returns >= nr_cpu_ids if no further cpus set.
78283 */
78284-static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
78285+static inline unsigned int __intentional_overflow(-1) cpumask_next(int n, const struct cpumask *srcp)
78286 {
78287 /* -1 is a legal arg here. */
78288 if (n != -1)
78289@@ -182,7 +182,7 @@ static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
78290 *
78291 * Returns >= nr_cpu_ids if no further cpus unset.
78292 */
78293-static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
78294+static inline unsigned int __intentional_overflow(-1) cpumask_next_zero(int n, const struct cpumask *srcp)
78295 {
78296 /* -1 is a legal arg here. */
78297 if (n != -1)
78298@@ -190,7 +190,7 @@ static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
78299 return find_next_zero_bit(cpumask_bits(srcp), nr_cpumask_bits, n+1);
78300 }
78301
78302-int cpumask_next_and(int n, const struct cpumask *, const struct cpumask *);
78303+int cpumask_next_and(int n, const struct cpumask *, const struct cpumask *) __intentional_overflow(-1);
78304 int cpumask_any_but(const struct cpumask *mask, unsigned int cpu);
78305
78306 /**
78307diff --git a/include/linux/cred.h b/include/linux/cred.h
78308index 04421e8..a85afd4 100644
78309--- a/include/linux/cred.h
78310+++ b/include/linux/cred.h
78311@@ -35,7 +35,7 @@ struct group_info {
78312 int nblocks;
78313 kgid_t small_block[NGROUPS_SMALL];
78314 kgid_t *blocks[0];
78315-};
78316+} __randomize_layout;
78317
78318 /**
78319 * get_group_info - Get a reference to a group info structure
78320@@ -136,7 +136,7 @@ struct cred {
78321 struct user_namespace *user_ns; /* user_ns the caps and keyrings are relative to. */
78322 struct group_info *group_info; /* supplementary groups for euid/fsgid */
78323 struct rcu_head rcu; /* RCU deletion hook */
78324-};
78325+} __randomize_layout;
78326
78327 extern void __put_cred(struct cred *);
78328 extern void exit_creds(struct task_struct *);
78329@@ -194,6 +194,9 @@ static inline void validate_creds_for_do_exit(struct task_struct *tsk)
78330 static inline void validate_process_creds(void)
78331 {
78332 }
78333+static inline void validate_task_creds(struct task_struct *task)
78334+{
78335+}
78336 #endif
78337
78338 /**
78339@@ -322,6 +325,7 @@ static inline void put_cred(const struct cred *_cred)
78340
78341 #define task_uid(task) (task_cred_xxx((task), uid))
78342 #define task_euid(task) (task_cred_xxx((task), euid))
78343+#define task_securebits(task) (task_cred_xxx((task), securebits))
78344
78345 #define current_cred_xxx(xxx) \
78346 ({ \
78347diff --git a/include/linux/crypto.h b/include/linux/crypto.h
78348index b92eadf..b4ecdc1 100644
78349--- a/include/linux/crypto.h
78350+++ b/include/linux/crypto.h
78351@@ -373,7 +373,7 @@ struct cipher_tfm {
78352 const u8 *key, unsigned int keylen);
78353 void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
78354 void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
78355-};
78356+} __no_const;
78357
78358 struct hash_tfm {
78359 int (*init)(struct hash_desc *desc);
78360@@ -394,13 +394,13 @@ struct compress_tfm {
78361 int (*cot_decompress)(struct crypto_tfm *tfm,
78362 const u8 *src, unsigned int slen,
78363 u8 *dst, unsigned int *dlen);
78364-};
78365+} __no_const;
78366
78367 struct rng_tfm {
78368 int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
78369 unsigned int dlen);
78370 int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
78371-};
78372+} __no_const;
78373
78374 #define crt_ablkcipher crt_u.ablkcipher
78375 #define crt_aead crt_u.aead
78376diff --git a/include/linux/ctype.h b/include/linux/ctype.h
78377index 653589e..4ef254a 100644
78378--- a/include/linux/ctype.h
78379+++ b/include/linux/ctype.h
78380@@ -56,7 +56,7 @@ static inline unsigned char __toupper(unsigned char c)
78381 * Fast implementation of tolower() for internal usage. Do not use in your
78382 * code.
78383 */
78384-static inline char _tolower(const char c)
78385+static inline unsigned char _tolower(const unsigned char c)
78386 {
78387 return c | 0x20;
78388 }
78389diff --git a/include/linux/dcache.h b/include/linux/dcache.h
78390index bf72e9a..4ca7927 100644
78391--- a/include/linux/dcache.h
78392+++ b/include/linux/dcache.h
78393@@ -133,7 +133,7 @@ struct dentry {
78394 } d_u;
78395 struct list_head d_subdirs; /* our children */
78396 struct hlist_node d_alias; /* inode alias list */
78397-};
78398+} __randomize_layout;
78399
78400 /*
78401 * dentry->d_lock spinlock nesting subclasses:
78402diff --git a/include/linux/decompress/mm.h b/include/linux/decompress/mm.h
78403index 7925bf0..d5143d2 100644
78404--- a/include/linux/decompress/mm.h
78405+++ b/include/linux/decompress/mm.h
78406@@ -77,7 +77,7 @@ static void free(void *where)
78407 * warnings when not needed (indeed large_malloc / large_free are not
78408 * needed by inflate */
78409
78410-#define malloc(a) kmalloc(a, GFP_KERNEL)
78411+#define malloc(a) kmalloc((a), GFP_KERNEL)
78412 #define free(a) kfree(a)
78413
78414 #define large_malloc(a) vmalloc(a)
78415diff --git a/include/linux/devfreq.h b/include/linux/devfreq.h
78416index d48dc00..211ee54 100644
78417--- a/include/linux/devfreq.h
78418+++ b/include/linux/devfreq.h
78419@@ -114,7 +114,7 @@ struct devfreq_governor {
78420 int (*get_target_freq)(struct devfreq *this, unsigned long *freq);
78421 int (*event_handler)(struct devfreq *devfreq,
78422 unsigned int event, void *data);
78423-};
78424+} __do_const;
78425
78426 /**
78427 * struct devfreq - Device devfreq structure
78428diff --git a/include/linux/device.h b/include/linux/device.h
78429index 952b010..d5b7691 100644
78430--- a/include/linux/device.h
78431+++ b/include/linux/device.h
78432@@ -310,7 +310,7 @@ struct subsys_interface {
78433 struct list_head node;
78434 int (*add_dev)(struct device *dev, struct subsys_interface *sif);
78435 int (*remove_dev)(struct device *dev, struct subsys_interface *sif);
78436-};
78437+} __do_const;
78438
78439 int subsys_interface_register(struct subsys_interface *sif);
78440 void subsys_interface_unregister(struct subsys_interface *sif);
78441@@ -506,7 +506,7 @@ struct device_type {
78442 void (*release)(struct device *dev);
78443
78444 const struct dev_pm_ops *pm;
78445-};
78446+} __do_const;
78447
78448 /* interface for exporting device attributes */
78449 struct device_attribute {
78450@@ -516,11 +516,12 @@ struct device_attribute {
78451 ssize_t (*store)(struct device *dev, struct device_attribute *attr,
78452 const char *buf, size_t count);
78453 };
78454+typedef struct device_attribute __no_const device_attribute_no_const;
78455
78456 struct dev_ext_attribute {
78457 struct device_attribute attr;
78458 void *var;
78459-};
78460+} __do_const;
78461
78462 ssize_t device_show_ulong(struct device *dev, struct device_attribute *attr,
78463 char *buf);
78464diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
78465index fd4aee2..1f28db9 100644
78466--- a/include/linux/dma-mapping.h
78467+++ b/include/linux/dma-mapping.h
78468@@ -54,7 +54,7 @@ struct dma_map_ops {
78469 u64 (*get_required_mask)(struct device *dev);
78470 #endif
78471 int is_phys;
78472-};
78473+} __do_const;
78474
78475 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
78476
78477diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
78478index 0a5f552..6661a5a 100644
78479--- a/include/linux/dmaengine.h
78480+++ b/include/linux/dmaengine.h
78481@@ -1151,9 +1151,9 @@ struct dma_pinned_list {
78482 struct dma_pinned_list *dma_pin_iovec_pages(struct iovec *iov, size_t len);
78483 void dma_unpin_iovec_pages(struct dma_pinned_list* pinned_list);
78484
78485-dma_cookie_t dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov,
78486+dma_cookie_t __intentional_overflow(0) dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov,
78487 struct dma_pinned_list *pinned_list, unsigned char *kdata, size_t len);
78488-dma_cookie_t dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov,
78489+dma_cookie_t __intentional_overflow(0) dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov,
78490 struct dma_pinned_list *pinned_list, struct page *page,
78491 unsigned int offset, size_t len);
78492
78493diff --git a/include/linux/efi.h b/include/linux/efi.h
78494index 0a819e7..8ed47f1 100644
78495--- a/include/linux/efi.h
78496+++ b/include/linux/efi.h
78497@@ -768,6 +768,7 @@ struct efivar_operations {
78498 efi_set_variable_t *set_variable;
78499 efi_query_variable_store_t *query_variable_store;
78500 };
78501+typedef struct efivar_operations __no_const efivar_operations_no_const;
78502
78503 struct efivars {
78504 /*
78505diff --git a/include/linux/elf.h b/include/linux/elf.h
78506index 67a5fa7..b817372 100644
78507--- a/include/linux/elf.h
78508+++ b/include/linux/elf.h
78509@@ -24,6 +24,7 @@ extern Elf32_Dyn _DYNAMIC [];
78510 #define elf_note elf32_note
78511 #define elf_addr_t Elf32_Off
78512 #define Elf_Half Elf32_Half
78513+#define elf_dyn Elf32_Dyn
78514
78515 #else
78516
78517@@ -34,6 +35,7 @@ extern Elf64_Dyn _DYNAMIC [];
78518 #define elf_note elf64_note
78519 #define elf_addr_t Elf64_Off
78520 #define Elf_Half Elf64_Half
78521+#define elf_dyn Elf64_Dyn
78522
78523 #endif
78524
78525diff --git a/include/linux/err.h b/include/linux/err.h
78526index 15f92e0..e825a8e 100644
78527--- a/include/linux/err.h
78528+++ b/include/linux/err.h
78529@@ -19,12 +19,12 @@
78530
78531 #define IS_ERR_VALUE(x) unlikely((x) >= (unsigned long)-MAX_ERRNO)
78532
78533-static inline void * __must_check ERR_PTR(long error)
78534+static inline void * __must_check __intentional_overflow(-1) ERR_PTR(long error)
78535 {
78536 return (void *) error;
78537 }
78538
78539-static inline long __must_check PTR_ERR(__force const void *ptr)
78540+static inline long __must_check __intentional_overflow(-1) PTR_ERR(__force const void *ptr)
78541 {
78542 return (long) ptr;
78543 }
78544diff --git a/include/linux/extcon.h b/include/linux/extcon.h
78545index 21c59af..6057a03 100644
78546--- a/include/linux/extcon.h
78547+++ b/include/linux/extcon.h
78548@@ -135,7 +135,7 @@ struct extcon_dev {
78549 /* /sys/class/extcon/.../mutually_exclusive/... */
78550 struct attribute_group attr_g_muex;
78551 struct attribute **attrs_muex;
78552- struct device_attribute *d_attrs_muex;
78553+ device_attribute_no_const *d_attrs_muex;
78554 };
78555
78556 /**
78557diff --git a/include/linux/fb.h b/include/linux/fb.h
78558index fe6ac95..898d41d 100644
78559--- a/include/linux/fb.h
78560+++ b/include/linux/fb.h
78561@@ -304,7 +304,7 @@ struct fb_ops {
78562 /* called at KDB enter and leave time to prepare the console */
78563 int (*fb_debug_enter)(struct fb_info *info);
78564 int (*fb_debug_leave)(struct fb_info *info);
78565-};
78566+} __do_const;
78567
78568 #ifdef CONFIG_FB_TILEBLITTING
78569 #define FB_TILE_CURSOR_NONE 0
78570diff --git a/include/linux/fdtable.h b/include/linux/fdtable.h
78571index 70e8e21..1939916 100644
78572--- a/include/linux/fdtable.h
78573+++ b/include/linux/fdtable.h
78574@@ -102,7 +102,7 @@ struct files_struct *get_files_struct(struct task_struct *);
78575 void put_files_struct(struct files_struct *fs);
78576 void reset_files_struct(struct files_struct *);
78577 int unshare_files(struct files_struct **);
78578-struct files_struct *dup_fd(struct files_struct *, int *);
78579+struct files_struct *dup_fd(struct files_struct *, int *) __latent_entropy;
78580 void do_close_on_exec(struct files_struct *);
78581 int iterate_fd(struct files_struct *, unsigned,
78582 int (*)(const void *, struct file *, unsigned),
78583diff --git a/include/linux/frontswap.h b/include/linux/frontswap.h
78584index 8293262..2b3b8bd 100644
78585--- a/include/linux/frontswap.h
78586+++ b/include/linux/frontswap.h
78587@@ -11,7 +11,7 @@ struct frontswap_ops {
78588 int (*load)(unsigned, pgoff_t, struct page *);
78589 void (*invalidate_page)(unsigned, pgoff_t);
78590 void (*invalidate_area)(unsigned);
78591-};
78592+} __no_const;
78593
78594 extern bool frontswap_enabled;
78595 extern struct frontswap_ops *
78596diff --git a/include/linux/fs.h b/include/linux/fs.h
78597index 23b2a35..8764ab7 100644
78598--- a/include/linux/fs.h
78599+++ b/include/linux/fs.h
78600@@ -426,7 +426,7 @@ struct address_space {
78601 spinlock_t private_lock; /* for use by the address_space */
78602 struct list_head private_list; /* ditto */
78603 void *private_data; /* ditto */
78604-} __attribute__((aligned(sizeof(long))));
78605+} __attribute__((aligned(sizeof(long)))) __randomize_layout;
78606 /*
78607 * On most architectures that alignment is already the case; but
78608 * must be enforced here for CRIS, to let the least significant bit
78609@@ -469,7 +469,7 @@ struct block_device {
78610 int bd_fsfreeze_count;
78611 /* Mutex for freeze */
78612 struct mutex bd_fsfreeze_mutex;
78613-};
78614+} __randomize_layout;
78615
78616 /*
78617 * Radix-tree tags, for tagging dirty and writeback pages within the pagecache
78618@@ -613,7 +613,7 @@ struct inode {
78619 atomic_t i_readcount; /* struct files open RO */
78620 #endif
78621 void *i_private; /* fs or device private pointer */
78622-};
78623+} __randomize_layout;
78624
78625 static inline int inode_unhashed(struct inode *inode)
78626 {
78627@@ -812,7 +812,7 @@ struct file {
78628 #ifdef CONFIG_DEBUG_WRITECOUNT
78629 unsigned long f_mnt_write_state;
78630 #endif
78631-} __attribute__((aligned(4))); /* lest something weird decides that 2 is OK */
78632+} __attribute__((aligned(4))) __randomize_layout; /* lest something weird decides that 2 is OK */
78633
78634 struct file_handle {
78635 __u32 handle_bytes;
78636@@ -982,7 +982,7 @@ struct file_lock {
78637 int state; /* state of grant or error if -ve */
78638 } afs;
78639 } fl_u;
78640-};
78641+} __randomize_layout;
78642
78643 /* The following constant reflects the upper bound of the file/locking space */
78644 #ifndef OFFSET_MAX
78645@@ -1329,7 +1329,7 @@ struct super_block {
78646 struct list_lru s_dentry_lru ____cacheline_aligned_in_smp;
78647 struct list_lru s_inode_lru ____cacheline_aligned_in_smp;
78648 struct rcu_head rcu;
78649-};
78650+} __randomize_layout;
78651
78652 extern struct timespec current_fs_time(struct super_block *sb);
78653
78654@@ -1551,7 +1551,8 @@ struct file_operations {
78655 long (*fallocate)(struct file *file, int mode, loff_t offset,
78656 loff_t len);
78657 int (*show_fdinfo)(struct seq_file *m, struct file *f);
78658-};
78659+} __do_const __randomize_layout;
78660+typedef struct file_operations __no_const file_operations_no_const;
78661
78662 struct inode_operations {
78663 struct dentry * (*lookup) (struct inode *,struct dentry *, unsigned int);
78664@@ -2820,4 +2821,14 @@ static inline bool dir_relax(struct inode *inode)
78665 return !IS_DEADDIR(inode);
78666 }
78667
78668+static inline bool is_sidechannel_device(const struct inode *inode)
78669+{
78670+#ifdef CONFIG_GRKERNSEC_DEVICE_SIDECHANNEL
78671+ umode_t mode = inode->i_mode;
78672+ return ((S_ISCHR(mode) || S_ISBLK(mode)) && (mode & (S_IROTH | S_IWOTH)));
78673+#else
78674+ return false;
78675+#endif
78676+}
78677+
78678 #endif /* _LINUX_FS_H */
78679diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h
78680index 0efc3e6..fd23610 100644
78681--- a/include/linux/fs_struct.h
78682+++ b/include/linux/fs_struct.h
78683@@ -6,13 +6,13 @@
78684 #include <linux/seqlock.h>
78685
78686 struct fs_struct {
78687- int users;
78688+ atomic_t users;
78689 spinlock_t lock;
78690 seqcount_t seq;
78691 int umask;
78692 int in_exec;
78693 struct path root, pwd;
78694-};
78695+} __randomize_layout;
78696
78697 extern struct kmem_cache *fs_cachep;
78698
78699diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
78700index 7714849..a4a5c7a 100644
78701--- a/include/linux/fscache-cache.h
78702+++ b/include/linux/fscache-cache.h
78703@@ -113,7 +113,7 @@ struct fscache_operation {
78704 fscache_operation_release_t release;
78705 };
78706
78707-extern atomic_t fscache_op_debug_id;
78708+extern atomic_unchecked_t fscache_op_debug_id;
78709 extern void fscache_op_work_func(struct work_struct *work);
78710
78711 extern void fscache_enqueue_operation(struct fscache_operation *);
78712@@ -135,7 +135,7 @@ static inline void fscache_operation_init(struct fscache_operation *op,
78713 INIT_WORK(&op->work, fscache_op_work_func);
78714 atomic_set(&op->usage, 1);
78715 op->state = FSCACHE_OP_ST_INITIALISED;
78716- op->debug_id = atomic_inc_return(&fscache_op_debug_id);
78717+ op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
78718 op->processor = processor;
78719 op->release = release;
78720 INIT_LIST_HEAD(&op->pend_link);
78721diff --git a/include/linux/fscache.h b/include/linux/fscache.h
78722index 115bb81..e7b812b 100644
78723--- a/include/linux/fscache.h
78724+++ b/include/linux/fscache.h
78725@@ -152,7 +152,7 @@ struct fscache_cookie_def {
78726 * - this is mandatory for any object that may have data
78727 */
78728 void (*now_uncached)(void *cookie_netfs_data);
78729-};
78730+} __do_const;
78731
78732 /*
78733 * fscache cached network filesystem type
78734diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
78735index 1c804b0..1432c2b 100644
78736--- a/include/linux/fsnotify.h
78737+++ b/include/linux/fsnotify.h
78738@@ -195,6 +195,9 @@ static inline void fsnotify_access(struct file *file)
78739 struct inode *inode = file_inode(file);
78740 __u32 mask = FS_ACCESS;
78741
78742+ if (is_sidechannel_device(inode))
78743+ return;
78744+
78745 if (S_ISDIR(inode->i_mode))
78746 mask |= FS_ISDIR;
78747
78748@@ -213,6 +216,9 @@ static inline void fsnotify_modify(struct file *file)
78749 struct inode *inode = file_inode(file);
78750 __u32 mask = FS_MODIFY;
78751
78752+ if (is_sidechannel_device(inode))
78753+ return;
78754+
78755 if (S_ISDIR(inode->i_mode))
78756 mask |= FS_ISDIR;
78757
78758@@ -315,7 +321,7 @@ static inline void fsnotify_change(struct dentry *dentry, unsigned int ia_valid)
78759 */
78760 static inline const unsigned char *fsnotify_oldname_init(const unsigned char *name)
78761 {
78762- return kstrdup(name, GFP_KERNEL);
78763+ return (const unsigned char *)kstrdup((const char *)name, GFP_KERNEL);
78764 }
78765
78766 /*
78767diff --git a/include/linux/genhd.h b/include/linux/genhd.h
78768index 9f3c275..8bdff5d 100644
78769--- a/include/linux/genhd.h
78770+++ b/include/linux/genhd.h
78771@@ -194,7 +194,7 @@ struct gendisk {
78772 struct kobject *slave_dir;
78773
78774 struct timer_rand_state *random;
78775- atomic_t sync_io; /* RAID */
78776+ atomic_unchecked_t sync_io; /* RAID */
78777 struct disk_events *ev;
78778 #ifdef CONFIG_BLK_DEV_INTEGRITY
78779 struct blk_integrity *integrity;
78780@@ -435,7 +435,7 @@ extern void disk_flush_events(struct gendisk *disk, unsigned int mask);
78781 extern unsigned int disk_clear_events(struct gendisk *disk, unsigned int mask);
78782
78783 /* drivers/char/random.c */
78784-extern void add_disk_randomness(struct gendisk *disk);
78785+extern void add_disk_randomness(struct gendisk *disk) __latent_entropy;
78786 extern void rand_initialize_disk(struct gendisk *disk);
78787
78788 static inline sector_t get_start_sect(struct block_device *bdev)
78789diff --git a/include/linux/genl_magic_func.h b/include/linux/genl_magic_func.h
78790index c0894dd..2fbf10c 100644
78791--- a/include/linux/genl_magic_func.h
78792+++ b/include/linux/genl_magic_func.h
78793@@ -246,7 +246,7 @@ const char *CONCAT_(GENL_MAGIC_FAMILY, _genl_cmd_to_str)(__u8 cmd)
78794 },
78795
78796 #define ZZZ_genl_ops CONCAT_(GENL_MAGIC_FAMILY, _genl_ops)
78797-static struct genl_ops ZZZ_genl_ops[] __read_mostly = {
78798+static struct genl_ops ZZZ_genl_ops[] = {
78799 #include GENL_MAGIC_INCLUDE_FILE
78800 };
78801
78802diff --git a/include/linux/gfp.h b/include/linux/gfp.h
78803index 39b81dc..819dc51 100644
78804--- a/include/linux/gfp.h
78805+++ b/include/linux/gfp.h
78806@@ -36,6 +36,13 @@ struct vm_area_struct;
78807 #define ___GFP_NO_KSWAPD 0x400000u
78808 #define ___GFP_OTHER_NODE 0x800000u
78809 #define ___GFP_WRITE 0x1000000u
78810+
78811+#ifdef CONFIG_PAX_USERCOPY_SLABS
78812+#define ___GFP_USERCOPY 0x2000000u
78813+#else
78814+#define ___GFP_USERCOPY 0
78815+#endif
78816+
78817 /* If the above are modified, __GFP_BITS_SHIFT may need updating */
78818
78819 /*
78820@@ -93,6 +100,7 @@ struct vm_area_struct;
78821 #define __GFP_OTHER_NODE ((__force gfp_t)___GFP_OTHER_NODE) /* On behalf of other node */
78822 #define __GFP_KMEMCG ((__force gfp_t)___GFP_KMEMCG) /* Allocation comes from a memcg-accounted resource */
78823 #define __GFP_WRITE ((__force gfp_t)___GFP_WRITE) /* Allocator intends to dirty page */
78824+#define __GFP_USERCOPY ((__force gfp_t)___GFP_USERCOPY)/* Allocator intends to copy page to/from userland */
78825
78826 /*
78827 * This may seem redundant, but it's a way of annotating false positives vs.
78828@@ -100,7 +108,7 @@ struct vm_area_struct;
78829 */
78830 #define __GFP_NOTRACK_FALSE_POSITIVE (__GFP_NOTRACK)
78831
78832-#define __GFP_BITS_SHIFT 25 /* Room for N __GFP_FOO bits */
78833+#define __GFP_BITS_SHIFT 26 /* Room for N __GFP_FOO bits */
78834 #define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
78835
78836 /* This equals 0, but use constants in case they ever change */
78837@@ -158,6 +166,8 @@ struct vm_area_struct;
78838 /* 4GB DMA on some platforms */
78839 #define GFP_DMA32 __GFP_DMA32
78840
78841+#define GFP_USERCOPY __GFP_USERCOPY
78842+
78843 /* Convert GFP flags to their corresponding migrate type */
78844 static inline int allocflags_to_migratetype(gfp_t gfp_flags)
78845 {
78846diff --git a/include/linux/gracl.h b/include/linux/gracl.h
78847new file mode 100644
78848index 0000000..edb2cb6
78849--- /dev/null
78850+++ b/include/linux/gracl.h
78851@@ -0,0 +1,340 @@
78852+#ifndef GR_ACL_H
78853+#define GR_ACL_H
78854+
78855+#include <linux/grdefs.h>
78856+#include <linux/resource.h>
78857+#include <linux/capability.h>
78858+#include <linux/dcache.h>
78859+#include <asm/resource.h>
78860+
78861+/* Major status information */
78862+
78863+#define GR_VERSION "grsecurity 3.0"
78864+#define GRSECURITY_VERSION 0x3000
78865+
78866+enum {
78867+ GR_SHUTDOWN = 0,
78868+ GR_ENABLE = 1,
78869+ GR_SPROLE = 2,
78870+ GR_OLDRELOAD = 3,
78871+ GR_SEGVMOD = 4,
78872+ GR_STATUS = 5,
78873+ GR_UNSPROLE = 6,
78874+ GR_PASSSET = 7,
78875+ GR_SPROLEPAM = 8,
78876+ GR_RELOAD = 9,
78877+};
78878+
78879+/* Password setup definitions
78880+ * kernel/grhash.c */
78881+enum {
78882+ GR_PW_LEN = 128,
78883+ GR_SALT_LEN = 16,
78884+ GR_SHA_LEN = 32,
78885+};
78886+
78887+enum {
78888+ GR_SPROLE_LEN = 64,
78889+};
78890+
78891+enum {
78892+ GR_NO_GLOB = 0,
78893+ GR_REG_GLOB,
78894+ GR_CREATE_GLOB
78895+};
78896+
78897+#define GR_NLIMITS 32
78898+
78899+/* Begin Data Structures */
78900+
78901+struct sprole_pw {
78902+ unsigned char *rolename;
78903+ unsigned char salt[GR_SALT_LEN];
78904+ unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */
78905+};
78906+
78907+struct name_entry {
78908+ __u32 key;
78909+ ino_t inode;
78910+ dev_t device;
78911+ char *name;
78912+ __u16 len;
78913+ __u8 deleted;
78914+ struct name_entry *prev;
78915+ struct name_entry *next;
78916+};
78917+
78918+struct inodev_entry {
78919+ struct name_entry *nentry;
78920+ struct inodev_entry *prev;
78921+ struct inodev_entry *next;
78922+};
78923+
78924+struct acl_role_db {
78925+ struct acl_role_label **r_hash;
78926+ __u32 r_size;
78927+};
78928+
78929+struct inodev_db {
78930+ struct inodev_entry **i_hash;
78931+ __u32 i_size;
78932+};
78933+
78934+struct name_db {
78935+ struct name_entry **n_hash;
78936+ __u32 n_size;
78937+};
78938+
78939+struct crash_uid {
78940+ uid_t uid;
78941+ unsigned long expires;
78942+};
78943+
78944+struct gr_hash_struct {
78945+ void **table;
78946+ void **nametable;
78947+ void *first;
78948+ __u32 table_size;
78949+ __u32 used_size;
78950+ int type;
78951+};
78952+
78953+/* Userspace Grsecurity ACL data structures */
78954+
78955+struct acl_subject_label {
78956+ char *filename;
78957+ ino_t inode;
78958+ dev_t device;
78959+ __u32 mode;
78960+ kernel_cap_t cap_mask;
78961+ kernel_cap_t cap_lower;
78962+ kernel_cap_t cap_invert_audit;
78963+
78964+ struct rlimit res[GR_NLIMITS];
78965+ __u32 resmask;
78966+
78967+ __u8 user_trans_type;
78968+ __u8 group_trans_type;
78969+ uid_t *user_transitions;
78970+ gid_t *group_transitions;
78971+ __u16 user_trans_num;
78972+ __u16 group_trans_num;
78973+
78974+ __u32 sock_families[2];
78975+ __u32 ip_proto[8];
78976+ __u32 ip_type;
78977+ struct acl_ip_label **ips;
78978+ __u32 ip_num;
78979+ __u32 inaddr_any_override;
78980+
78981+ __u32 crashes;
78982+ unsigned long expires;
78983+
78984+ struct acl_subject_label *parent_subject;
78985+ struct gr_hash_struct *hash;
78986+ struct acl_subject_label *prev;
78987+ struct acl_subject_label *next;
78988+
78989+ struct acl_object_label **obj_hash;
78990+ __u32 obj_hash_size;
78991+ __u16 pax_flags;
78992+};
78993+
78994+struct role_allowed_ip {
78995+ __u32 addr;
78996+ __u32 netmask;
78997+
78998+ struct role_allowed_ip *prev;
78999+ struct role_allowed_ip *next;
79000+};
79001+
79002+struct role_transition {
79003+ char *rolename;
79004+
79005+ struct role_transition *prev;
79006+ struct role_transition *next;
79007+};
79008+
79009+struct acl_role_label {
79010+ char *rolename;
79011+ uid_t uidgid;
79012+ __u16 roletype;
79013+
79014+ __u16 auth_attempts;
79015+ unsigned long expires;
79016+
79017+ struct acl_subject_label *root_label;
79018+ struct gr_hash_struct *hash;
79019+
79020+ struct acl_role_label *prev;
79021+ struct acl_role_label *next;
79022+
79023+ struct role_transition *transitions;
79024+ struct role_allowed_ip *allowed_ips;
79025+ uid_t *domain_children;
79026+ __u16 domain_child_num;
79027+
79028+ umode_t umask;
79029+
79030+ struct acl_subject_label **subj_hash;
79031+ __u32 subj_hash_size;
79032+};
79033+
79034+struct user_acl_role_db {
79035+ struct acl_role_label **r_table;
79036+ __u32 num_pointers; /* Number of allocations to track */
79037+ __u32 num_roles; /* Number of roles */
79038+ __u32 num_domain_children; /* Number of domain children */
79039+ __u32 num_subjects; /* Number of subjects */
79040+ __u32 num_objects; /* Number of objects */
79041+};
79042+
79043+struct acl_object_label {
79044+ char *filename;
79045+ ino_t inode;
79046+ dev_t device;
79047+ __u32 mode;
79048+
79049+ struct acl_subject_label *nested;
79050+ struct acl_object_label *globbed;
79051+
79052+ /* next two structures not used */
79053+
79054+ struct acl_object_label *prev;
79055+ struct acl_object_label *next;
79056+};
79057+
79058+struct acl_ip_label {
79059+ char *iface;
79060+ __u32 addr;
79061+ __u32 netmask;
79062+ __u16 low, high;
79063+ __u8 mode;
79064+ __u32 type;
79065+ __u32 proto[8];
79066+
79067+ /* next two structures not used */
79068+
79069+ struct acl_ip_label *prev;
79070+ struct acl_ip_label *next;
79071+};
79072+
79073+struct gr_arg {
79074+ struct user_acl_role_db role_db;
79075+ unsigned char pw[GR_PW_LEN];
79076+ unsigned char salt[GR_SALT_LEN];
79077+ unsigned char sum[GR_SHA_LEN];
79078+ unsigned char sp_role[GR_SPROLE_LEN];
79079+ struct sprole_pw *sprole_pws;
79080+ dev_t segv_device;
79081+ ino_t segv_inode;
79082+ uid_t segv_uid;
79083+ __u16 num_sprole_pws;
79084+ __u16 mode;
79085+};
79086+
79087+struct gr_arg_wrapper {
79088+ struct gr_arg *arg;
79089+ __u32 version;
79090+ __u32 size;
79091+};
79092+
79093+struct subject_map {
79094+ struct acl_subject_label *user;
79095+ struct acl_subject_label *kernel;
79096+ struct subject_map *prev;
79097+ struct subject_map *next;
79098+};
79099+
79100+struct acl_subj_map_db {
79101+ struct subject_map **s_hash;
79102+ __u32 s_size;
79103+};
79104+
79105+struct gr_policy_state {
79106+ struct sprole_pw **acl_special_roles;
79107+ __u16 num_sprole_pws;
79108+ struct acl_role_label *kernel_role;
79109+ struct acl_role_label *role_list;
79110+ struct acl_role_label *default_role;
79111+ struct acl_role_db acl_role_set;
79112+ struct acl_subj_map_db subj_map_set;
79113+ struct name_db name_set;
79114+ struct inodev_db inodev_set;
79115+};
79116+
79117+struct gr_alloc_state {
79118+ unsigned long alloc_stack_next;
79119+ unsigned long alloc_stack_size;
79120+ void **alloc_stack;
79121+};
79122+
79123+struct gr_reload_state {
79124+ struct gr_policy_state oldpolicy;
79125+ struct gr_alloc_state oldalloc;
79126+ struct gr_policy_state newpolicy;
79127+ struct gr_alloc_state newalloc;
79128+ struct gr_policy_state *oldpolicy_ptr;
79129+ struct gr_alloc_state *oldalloc_ptr;
79130+ unsigned char oldmode;
79131+};
79132+
79133+/* End Data Structures Section */
79134+
79135+/* Hash functions generated by empirical testing by Brad Spengler
79136+ Makes good use of the low bits of the inode. Generally 0-1 times
79137+ in loop for successful match. 0-3 for unsuccessful match.
79138+ Shift/add algorithm with modulus of table size and an XOR*/
79139+
79140+static __inline__ unsigned int
79141+gr_rhash(const uid_t uid, const __u16 type, const unsigned int sz)
79142+{
79143+ return ((((uid + type) << (16 + type)) ^ uid) % sz);
79144+}
79145+
79146+ static __inline__ unsigned int
79147+gr_shash(const struct acl_subject_label *userp, const unsigned int sz)
79148+{
79149+ return ((const unsigned long)userp % sz);
79150+}
79151+
79152+static __inline__ unsigned int
79153+gr_fhash(const ino_t ino, const dev_t dev, const unsigned int sz)
79154+{
79155+ return (((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9))) % sz);
79156+}
79157+
79158+static __inline__ unsigned int
79159+gr_nhash(const char *name, const __u16 len, const unsigned int sz)
79160+{
79161+ return full_name_hash((const unsigned char *)name, len) % sz;
79162+}
79163+
79164+#define FOR_EACH_SUBJECT_START(role,subj,iter) \
79165+ subj = NULL; \
79166+ iter = 0; \
79167+ while (iter < role->subj_hash_size) { \
79168+ if (subj == NULL) \
79169+ subj = role->subj_hash[iter]; \
79170+ if (subj == NULL) { \
79171+ iter++; \
79172+ continue; \
79173+ }
79174+
79175+#define FOR_EACH_SUBJECT_END(subj,iter) \
79176+ subj = subj->next; \
79177+ if (subj == NULL) \
79178+ iter++; \
79179+ }
79180+
79181+
79182+#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
79183+ subj = role->hash->first; \
79184+ while (subj != NULL) {
79185+
79186+#define FOR_EACH_NESTED_SUBJECT_END(subj) \
79187+ subj = subj->next; \
79188+ }
79189+
79190+#endif
79191+
79192diff --git a/include/linux/gracl_compat.h b/include/linux/gracl_compat.h
79193new file mode 100644
79194index 0000000..33ebd1f
79195--- /dev/null
79196+++ b/include/linux/gracl_compat.h
79197@@ -0,0 +1,156 @@
79198+#ifndef GR_ACL_COMPAT_H
79199+#define GR_ACL_COMPAT_H
79200+
79201+#include <linux/resource.h>
79202+#include <asm/resource.h>
79203+
79204+struct sprole_pw_compat {
79205+ compat_uptr_t rolename;
79206+ unsigned char salt[GR_SALT_LEN];
79207+ unsigned char sum[GR_SHA_LEN];
79208+};
79209+
79210+struct gr_hash_struct_compat {
79211+ compat_uptr_t table;
79212+ compat_uptr_t nametable;
79213+ compat_uptr_t first;
79214+ __u32 table_size;
79215+ __u32 used_size;
79216+ int type;
79217+};
79218+
79219+struct acl_subject_label_compat {
79220+ compat_uptr_t filename;
79221+ compat_ino_t inode;
79222+ __u32 device;
79223+ __u32 mode;
79224+ kernel_cap_t cap_mask;
79225+ kernel_cap_t cap_lower;
79226+ kernel_cap_t cap_invert_audit;
79227+
79228+ struct compat_rlimit res[GR_NLIMITS];
79229+ __u32 resmask;
79230+
79231+ __u8 user_trans_type;
79232+ __u8 group_trans_type;
79233+ compat_uptr_t user_transitions;
79234+ compat_uptr_t group_transitions;
79235+ __u16 user_trans_num;
79236+ __u16 group_trans_num;
79237+
79238+ __u32 sock_families[2];
79239+ __u32 ip_proto[8];
79240+ __u32 ip_type;
79241+ compat_uptr_t ips;
79242+ __u32 ip_num;
79243+ __u32 inaddr_any_override;
79244+
79245+ __u32 crashes;
79246+ compat_ulong_t expires;
79247+
79248+ compat_uptr_t parent_subject;
79249+ compat_uptr_t hash;
79250+ compat_uptr_t prev;
79251+ compat_uptr_t next;
79252+
79253+ compat_uptr_t obj_hash;
79254+ __u32 obj_hash_size;
79255+ __u16 pax_flags;
79256+};
79257+
79258+struct role_allowed_ip_compat {
79259+ __u32 addr;
79260+ __u32 netmask;
79261+
79262+ compat_uptr_t prev;
79263+ compat_uptr_t next;
79264+};
79265+
79266+struct role_transition_compat {
79267+ compat_uptr_t rolename;
79268+
79269+ compat_uptr_t prev;
79270+ compat_uptr_t next;
79271+};
79272+
79273+struct acl_role_label_compat {
79274+ compat_uptr_t rolename;
79275+ uid_t uidgid;
79276+ __u16 roletype;
79277+
79278+ __u16 auth_attempts;
79279+ compat_ulong_t expires;
79280+
79281+ compat_uptr_t root_label;
79282+ compat_uptr_t hash;
79283+
79284+ compat_uptr_t prev;
79285+ compat_uptr_t next;
79286+
79287+ compat_uptr_t transitions;
79288+ compat_uptr_t allowed_ips;
79289+ compat_uptr_t domain_children;
79290+ __u16 domain_child_num;
79291+
79292+ umode_t umask;
79293+
79294+ compat_uptr_t subj_hash;
79295+ __u32 subj_hash_size;
79296+};
79297+
79298+struct user_acl_role_db_compat {
79299+ compat_uptr_t r_table;
79300+ __u32 num_pointers;
79301+ __u32 num_roles;
79302+ __u32 num_domain_children;
79303+ __u32 num_subjects;
79304+ __u32 num_objects;
79305+};
79306+
79307+struct acl_object_label_compat {
79308+ compat_uptr_t filename;
79309+ compat_ino_t inode;
79310+ __u32 device;
79311+ __u32 mode;
79312+
79313+ compat_uptr_t nested;
79314+ compat_uptr_t globbed;
79315+
79316+ compat_uptr_t prev;
79317+ compat_uptr_t next;
79318+};
79319+
79320+struct acl_ip_label_compat {
79321+ compat_uptr_t iface;
79322+ __u32 addr;
79323+ __u32 netmask;
79324+ __u16 low, high;
79325+ __u8 mode;
79326+ __u32 type;
79327+ __u32 proto[8];
79328+
79329+ compat_uptr_t prev;
79330+ compat_uptr_t next;
79331+};
79332+
79333+struct gr_arg_compat {
79334+ struct user_acl_role_db_compat role_db;
79335+ unsigned char pw[GR_PW_LEN];
79336+ unsigned char salt[GR_SALT_LEN];
79337+ unsigned char sum[GR_SHA_LEN];
79338+ unsigned char sp_role[GR_SPROLE_LEN];
79339+ compat_uptr_t sprole_pws;
79340+ __u32 segv_device;
79341+ compat_ino_t segv_inode;
79342+ uid_t segv_uid;
79343+ __u16 num_sprole_pws;
79344+ __u16 mode;
79345+};
79346+
79347+struct gr_arg_wrapper_compat {
79348+ compat_uptr_t arg;
79349+ __u32 version;
79350+ __u32 size;
79351+};
79352+
79353+#endif
79354diff --git a/include/linux/gralloc.h b/include/linux/gralloc.h
79355new file mode 100644
79356index 0000000..323ecf2
79357--- /dev/null
79358+++ b/include/linux/gralloc.h
79359@@ -0,0 +1,9 @@
79360+#ifndef __GRALLOC_H
79361+#define __GRALLOC_H
79362+
79363+void acl_free_all(void);
79364+int acl_alloc_stack_init(unsigned long size);
79365+void *acl_alloc(unsigned long len);
79366+void *acl_alloc_num(unsigned long num, unsigned long len);
79367+
79368+#endif
79369diff --git a/include/linux/grdefs.h b/include/linux/grdefs.h
79370new file mode 100644
79371index 0000000..be66033
79372--- /dev/null
79373+++ b/include/linux/grdefs.h
79374@@ -0,0 +1,140 @@
79375+#ifndef GRDEFS_H
79376+#define GRDEFS_H
79377+
79378+/* Begin grsecurity status declarations */
79379+
79380+enum {
79381+ GR_READY = 0x01,
79382+ GR_STATUS_INIT = 0x00 // disabled state
79383+};
79384+
79385+/* Begin ACL declarations */
79386+
79387+/* Role flags */
79388+
79389+enum {
79390+ GR_ROLE_USER = 0x0001,
79391+ GR_ROLE_GROUP = 0x0002,
79392+ GR_ROLE_DEFAULT = 0x0004,
79393+ GR_ROLE_SPECIAL = 0x0008,
79394+ GR_ROLE_AUTH = 0x0010,
79395+ GR_ROLE_NOPW = 0x0020,
79396+ GR_ROLE_GOD = 0x0040,
79397+ GR_ROLE_LEARN = 0x0080,
79398+ GR_ROLE_TPE = 0x0100,
79399+ GR_ROLE_DOMAIN = 0x0200,
79400+ GR_ROLE_PAM = 0x0400,
79401+ GR_ROLE_PERSIST = 0x0800
79402+};
79403+
79404+/* ACL Subject and Object mode flags */
79405+enum {
79406+ GR_DELETED = 0x80000000
79407+};
79408+
79409+/* ACL Object-only mode flags */
79410+enum {
79411+ GR_READ = 0x00000001,
79412+ GR_APPEND = 0x00000002,
79413+ GR_WRITE = 0x00000004,
79414+ GR_EXEC = 0x00000008,
79415+ GR_FIND = 0x00000010,
79416+ GR_INHERIT = 0x00000020,
79417+ GR_SETID = 0x00000040,
79418+ GR_CREATE = 0x00000080,
79419+ GR_DELETE = 0x00000100,
79420+ GR_LINK = 0x00000200,
79421+ GR_AUDIT_READ = 0x00000400,
79422+ GR_AUDIT_APPEND = 0x00000800,
79423+ GR_AUDIT_WRITE = 0x00001000,
79424+ GR_AUDIT_EXEC = 0x00002000,
79425+ GR_AUDIT_FIND = 0x00004000,
79426+ GR_AUDIT_INHERIT= 0x00008000,
79427+ GR_AUDIT_SETID = 0x00010000,
79428+ GR_AUDIT_CREATE = 0x00020000,
79429+ GR_AUDIT_DELETE = 0x00040000,
79430+ GR_AUDIT_LINK = 0x00080000,
79431+ GR_PTRACERD = 0x00100000,
79432+ GR_NOPTRACE = 0x00200000,
79433+ GR_SUPPRESS = 0x00400000,
79434+ GR_NOLEARN = 0x00800000,
79435+ GR_INIT_TRANSFER= 0x01000000
79436+};
79437+
79438+#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
79439+ GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
79440+ GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
79441+
79442+/* ACL subject-only mode flags */
79443+enum {
79444+ GR_KILL = 0x00000001,
79445+ GR_VIEW = 0x00000002,
79446+ GR_PROTECTED = 0x00000004,
79447+ GR_LEARN = 0x00000008,
79448+ GR_OVERRIDE = 0x00000010,
79449+ /* just a placeholder, this mode is only used in userspace */
79450+ GR_DUMMY = 0x00000020,
79451+ GR_PROTSHM = 0x00000040,
79452+ GR_KILLPROC = 0x00000080,
79453+ GR_KILLIPPROC = 0x00000100,
79454+ /* just a placeholder, this mode is only used in userspace */
79455+ GR_NOTROJAN = 0x00000200,
79456+ GR_PROTPROCFD = 0x00000400,
79457+ GR_PROCACCT = 0x00000800,
79458+ GR_RELAXPTRACE = 0x00001000,
79459+ //GR_NESTED = 0x00002000,
79460+ GR_INHERITLEARN = 0x00004000,
79461+ GR_PROCFIND = 0x00008000,
79462+ GR_POVERRIDE = 0x00010000,
79463+ GR_KERNELAUTH = 0x00020000,
79464+ GR_ATSECURE = 0x00040000,
79465+ GR_SHMEXEC = 0x00080000
79466+};
79467+
79468+enum {
79469+ GR_PAX_ENABLE_SEGMEXEC = 0x0001,
79470+ GR_PAX_ENABLE_PAGEEXEC = 0x0002,
79471+ GR_PAX_ENABLE_MPROTECT = 0x0004,
79472+ GR_PAX_ENABLE_RANDMMAP = 0x0008,
79473+ GR_PAX_ENABLE_EMUTRAMP = 0x0010,
79474+ GR_PAX_DISABLE_SEGMEXEC = 0x0100,
79475+ GR_PAX_DISABLE_PAGEEXEC = 0x0200,
79476+ GR_PAX_DISABLE_MPROTECT = 0x0400,
79477+ GR_PAX_DISABLE_RANDMMAP = 0x0800,
79478+ GR_PAX_DISABLE_EMUTRAMP = 0x1000,
79479+};
79480+
79481+enum {
79482+ GR_ID_USER = 0x01,
79483+ GR_ID_GROUP = 0x02,
79484+};
79485+
79486+enum {
79487+ GR_ID_ALLOW = 0x01,
79488+ GR_ID_DENY = 0x02,
79489+};
79490+
79491+#define GR_CRASH_RES 31
79492+#define GR_UIDTABLE_MAX 500
79493+
79494+/* begin resource learning section */
79495+enum {
79496+ GR_RLIM_CPU_BUMP = 60,
79497+ GR_RLIM_FSIZE_BUMP = 50000,
79498+ GR_RLIM_DATA_BUMP = 10000,
79499+ GR_RLIM_STACK_BUMP = 1000,
79500+ GR_RLIM_CORE_BUMP = 10000,
79501+ GR_RLIM_RSS_BUMP = 500000,
79502+ GR_RLIM_NPROC_BUMP = 1,
79503+ GR_RLIM_NOFILE_BUMP = 5,
79504+ GR_RLIM_MEMLOCK_BUMP = 50000,
79505+ GR_RLIM_AS_BUMP = 500000,
79506+ GR_RLIM_LOCKS_BUMP = 2,
79507+ GR_RLIM_SIGPENDING_BUMP = 5,
79508+ GR_RLIM_MSGQUEUE_BUMP = 10000,
79509+ GR_RLIM_NICE_BUMP = 1,
79510+ GR_RLIM_RTPRIO_BUMP = 1,
79511+ GR_RLIM_RTTIME_BUMP = 1000000
79512+};
79513+
79514+#endif
79515diff --git a/include/linux/grinternal.h b/include/linux/grinternal.h
79516new file mode 100644
79517index 0000000..d25522e
79518--- /dev/null
79519+++ b/include/linux/grinternal.h
79520@@ -0,0 +1,229 @@
79521+#ifndef __GRINTERNAL_H
79522+#define __GRINTERNAL_H
79523+
79524+#ifdef CONFIG_GRKERNSEC
79525+
79526+#include <linux/fs.h>
79527+#include <linux/mnt_namespace.h>
79528+#include <linux/nsproxy.h>
79529+#include <linux/gracl.h>
79530+#include <linux/grdefs.h>
79531+#include <linux/grmsg.h>
79532+
79533+void gr_add_learn_entry(const char *fmt, ...)
79534+ __attribute__ ((format (printf, 1, 2)));
79535+__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
79536+ const struct vfsmount *mnt);
79537+__u32 gr_check_create(const struct dentry *new_dentry,
79538+ const struct dentry *parent,
79539+ const struct vfsmount *mnt, const __u32 mode);
79540+int gr_check_protected_task(const struct task_struct *task);
79541+__u32 to_gr_audit(const __u32 reqmode);
79542+int gr_set_acls(const int type);
79543+int gr_acl_is_enabled(void);
79544+char gr_roletype_to_char(void);
79545+
79546+void gr_handle_alertkill(struct task_struct *task);
79547+char *gr_to_filename(const struct dentry *dentry,
79548+ const struct vfsmount *mnt);
79549+char *gr_to_filename1(const struct dentry *dentry,
79550+ const struct vfsmount *mnt);
79551+char *gr_to_filename2(const struct dentry *dentry,
79552+ const struct vfsmount *mnt);
79553+char *gr_to_filename3(const struct dentry *dentry,
79554+ const struct vfsmount *mnt);
79555+
79556+extern int grsec_enable_ptrace_readexec;
79557+extern int grsec_enable_harden_ptrace;
79558+extern int grsec_enable_link;
79559+extern int grsec_enable_fifo;
79560+extern int grsec_enable_execve;
79561+extern int grsec_enable_shm;
79562+extern int grsec_enable_execlog;
79563+extern int grsec_enable_signal;
79564+extern int grsec_enable_audit_ptrace;
79565+extern int grsec_enable_forkfail;
79566+extern int grsec_enable_time;
79567+extern int grsec_enable_rofs;
79568+extern int grsec_deny_new_usb;
79569+extern int grsec_enable_chroot_shmat;
79570+extern int grsec_enable_chroot_mount;
79571+extern int grsec_enable_chroot_double;
79572+extern int grsec_enable_chroot_pivot;
79573+extern int grsec_enable_chroot_chdir;
79574+extern int grsec_enable_chroot_chmod;
79575+extern int grsec_enable_chroot_mknod;
79576+extern int grsec_enable_chroot_fchdir;
79577+extern int grsec_enable_chroot_nice;
79578+extern int grsec_enable_chroot_execlog;
79579+extern int grsec_enable_chroot_caps;
79580+extern int grsec_enable_chroot_sysctl;
79581+extern int grsec_enable_chroot_unix;
79582+extern int grsec_enable_symlinkown;
79583+extern kgid_t grsec_symlinkown_gid;
79584+extern int grsec_enable_tpe;
79585+extern kgid_t grsec_tpe_gid;
79586+extern int grsec_enable_tpe_all;
79587+extern int grsec_enable_tpe_invert;
79588+extern int grsec_enable_socket_all;
79589+extern kgid_t grsec_socket_all_gid;
79590+extern int grsec_enable_socket_client;
79591+extern kgid_t grsec_socket_client_gid;
79592+extern int grsec_enable_socket_server;
79593+extern kgid_t grsec_socket_server_gid;
79594+extern kgid_t grsec_audit_gid;
79595+extern int grsec_enable_group;
79596+extern int grsec_enable_log_rwxmaps;
79597+extern int grsec_enable_mount;
79598+extern int grsec_enable_chdir;
79599+extern int grsec_resource_logging;
79600+extern int grsec_enable_blackhole;
79601+extern int grsec_lastack_retries;
79602+extern int grsec_enable_brute;
79603+extern int grsec_enable_harden_ipc;
79604+extern int grsec_lock;
79605+
79606+extern spinlock_t grsec_alert_lock;
79607+extern unsigned long grsec_alert_wtime;
79608+extern unsigned long grsec_alert_fyet;
79609+
79610+extern spinlock_t grsec_audit_lock;
79611+
79612+extern rwlock_t grsec_exec_file_lock;
79613+
79614+#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \
79615+ gr_to_filename2((tsk)->exec_file->f_path.dentry, \
79616+ (tsk)->exec_file->f_path.mnt) : "/")
79617+
79618+#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \
79619+ gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \
79620+ (tsk)->real_parent->exec_file->f_path.mnt) : "/")
79621+
79622+#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \
79623+ gr_to_filename((tsk)->exec_file->f_path.dentry, \
79624+ (tsk)->exec_file->f_path.mnt) : "/")
79625+
79626+#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \
79627+ gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \
79628+ (tsk)->real_parent->exec_file->f_path.mnt) : "/")
79629+
79630+#define proc_is_chrooted(tsk_a) ((tsk_a)->gr_is_chrooted)
79631+
79632+#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry)
79633+
79634+static inline bool gr_is_same_file(const struct file *file1, const struct file *file2)
79635+{
79636+ if (file1 && file2) {
79637+ const struct inode *inode1 = file1->f_path.dentry->d_inode;
79638+ const struct inode *inode2 = file2->f_path.dentry->d_inode;
79639+ if (inode1->i_ino == inode2->i_ino && inode1->i_sb->s_dev == inode2->i_sb->s_dev)
79640+ return true;
79641+ }
79642+
79643+ return false;
79644+}
79645+
79646+#define GR_CHROOT_CAPS {{ \
79647+ CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
79648+ CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
79649+ CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
79650+ CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
79651+ CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
79652+ CAP_TO_MASK(CAP_IPC_OWNER) | CAP_TO_MASK(CAP_SETFCAP), \
79653+ CAP_TO_MASK(CAP_SYSLOG) | CAP_TO_MASK(CAP_MAC_ADMIN) }}
79654+
79655+#define security_learn(normal_msg,args...) \
79656+({ \
79657+ read_lock(&grsec_exec_file_lock); \
79658+ gr_add_learn_entry(normal_msg "\n", ## args); \
79659+ read_unlock(&grsec_exec_file_lock); \
79660+})
79661+
79662+enum {
79663+ GR_DO_AUDIT,
79664+ GR_DONT_AUDIT,
79665+ /* used for non-audit messages that we shouldn't kill the task on */
79666+ GR_DONT_AUDIT_GOOD
79667+};
79668+
79669+enum {
79670+ GR_TTYSNIFF,
79671+ GR_RBAC,
79672+ GR_RBAC_STR,
79673+ GR_STR_RBAC,
79674+ GR_RBAC_MODE2,
79675+ GR_RBAC_MODE3,
79676+ GR_FILENAME,
79677+ GR_SYSCTL_HIDDEN,
79678+ GR_NOARGS,
79679+ GR_ONE_INT,
79680+ GR_ONE_INT_TWO_STR,
79681+ GR_ONE_STR,
79682+ GR_STR_INT,
79683+ GR_TWO_STR_INT,
79684+ GR_TWO_INT,
79685+ GR_TWO_U64,
79686+ GR_THREE_INT,
79687+ GR_FIVE_INT_TWO_STR,
79688+ GR_TWO_STR,
79689+ GR_THREE_STR,
79690+ GR_FOUR_STR,
79691+ GR_STR_FILENAME,
79692+ GR_FILENAME_STR,
79693+ GR_FILENAME_TWO_INT,
79694+ GR_FILENAME_TWO_INT_STR,
79695+ GR_TEXTREL,
79696+ GR_PTRACE,
79697+ GR_RESOURCE,
79698+ GR_CAP,
79699+ GR_SIG,
79700+ GR_SIG2,
79701+ GR_CRASH1,
79702+ GR_CRASH2,
79703+ GR_PSACCT,
79704+ GR_RWXMAP,
79705+ GR_RWXMAPVMA
79706+};
79707+
79708+#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
79709+#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
79710+#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
79711+#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
79712+#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
79713+#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
79714+#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
79715+#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
79716+#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
79717+#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
79718+#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
79719+#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
79720+#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
79721+#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
79722+#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2)
79723+#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
79724+#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
79725+#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
79726+#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num)
79727+#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
79728+#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
79729+#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
79730+#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
79731+#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
79732+#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
79733+#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2)
79734+#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
79735+#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
79736+#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
79737+#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr)
79738+#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num)
79739+#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
79740+#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
79741+#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
79742+#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str)
79743+#define gr_log_rwxmap_vma(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAPVMA, str)
79744+
79745+void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
79746+
79747+#endif
79748+
79749+#endif
79750diff --git a/include/linux/grmsg.h b/include/linux/grmsg.h
79751new file mode 100644
79752index 0000000..ba93581
79753--- /dev/null
79754+++ b/include/linux/grmsg.h
79755@@ -0,0 +1,116 @@
79756+#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
79757+#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
79758+#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
79759+#define GR_STOPMOD_MSG "denied modification of module state by "
79760+#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
79761+#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
79762+#define GR_IOPERM_MSG "denied use of ioperm() by "
79763+#define GR_IOPL_MSG "denied use of iopl() by "
79764+#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
79765+#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
79766+#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
79767+#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by "
79768+#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
79769+#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4"
79770+#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4"
79771+#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
79772+#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
79773+#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
79774+#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
79775+#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
79776+#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
79777+#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
79778+#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
79779+#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
79780+#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
79781+#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
79782+#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
79783+#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
79784+#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
79785+#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
79786+#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
79787+#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
79788+#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
79789+#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
79790+#define GR_EXEC_TPE_MSG "denied untrusted exec (due to %.70s) of %.950s by "
79791+#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
79792+#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
79793+#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
79794+#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
79795+#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
79796+#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
79797+#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
79798+#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
79799+#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
79800+#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
79801+#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
79802+#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
79803+#define GR_SETXATTR_ACL_MSG "%s setting extended attribute of %.950s by "
79804+#define GR_REMOVEXATTR_ACL_MSG "%s removing extended attribute of %.950s by "
79805+#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
79806+#define GR_INITF_ACL_MSG "init_variables() failed %s by "
79807+#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
79808+#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbage by "
79809+#define GR_SHUTS_ACL_MSG "shutdown auth success for "
79810+#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
79811+#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
79812+#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
79813+#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
79814+#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
79815+#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
79816+#define GR_ENABLEF_ACL_MSG "unable to load %s for "
79817+#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
79818+#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
79819+#define GR_RELOADF_ACL_MSG "failed reload of %s for "
79820+#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
79821+#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
79822+#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
79823+#define GR_SPROLEF_ACL_MSG "special role %s failure for "
79824+#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
79825+#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
79826+#define GR_INVMODE_ACL_MSG "invalid mode %d by "
79827+#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
79828+#define GR_FAILFORK_MSG "failed fork with errno %s by "
79829+#define GR_NICE_CHROOT_MSG "denied priority change by "
79830+#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
79831+#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
79832+#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
79833+#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
79834+#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
79835+#define GR_TIME_MSG "time set by "
79836+#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
79837+#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
79838+#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
79839+#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
79840+#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by "
79841+#define GR_BIND_MSG "denied bind() by "
79842+#define GR_CONNECT_MSG "denied connect() by "
79843+#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by "
79844+#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by "
79845+#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4"
79846+#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
79847+#define GR_CAP_ACL_MSG "use of %s denied for "
79848+#define GR_CAP_CHROOT_MSG "use of %s in chroot denied for "
79849+#define GR_CAP_ACL_MSG2 "use of %s permitted for "
79850+#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
79851+#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
79852+#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
79853+#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
79854+#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
79855+#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
79856+#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
79857+#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
79858+#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by "
79859+#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by "
79860+#define GR_TEXTREL_AUDIT_MSG "denied text relocation in %.950s, VMA:0x%08lx 0x%08lx by "
79861+#define GR_PTGNUSTACK_MSG "denied marking stack executable as requested by PT_GNU_STACK marking in %.950s by "
79862+#define GR_VM86_MSG "denied use of vm86 by "
79863+#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
79864+#define GR_PTRACE_READEXEC_MSG "denied ptrace of unreadable binary %.950s by "
79865+#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by "
79866+#define GR_BADPROCPID_MSG "denied read of sensitive /proc/pid/%s entry via fd passed across exec by "
79867+#define GR_SYMLINKOWNER_MSG "denied following symlink %.950s since symlink owner %u does not match target owner %u, by "
79868+#define GR_BRUTE_DAEMON_MSG "bruteforce prevention initiated for the next 30 minutes or until service restarted, stalling each fork 30 seconds. Please investigate the crash report for "
79869+#define GR_BRUTE_SUID_MSG "bruteforce prevention initiated due to crash of %.950s against uid %u, banning suid/sgid execs for %u minutes. Please investigate the crash report for "
79870+#define GR_IPC_DENIED_MSG "denied %s of overly-permissive IPC object with creator uid %u by "
79871+#define GR_MSRWRITE_MSG "denied write to CPU MSR by "
79872diff --git a/include/linux/grsecurity.h b/include/linux/grsecurity.h
79873new file mode 100644
79874index 0000000..f2d8c6c
79875--- /dev/null
79876+++ b/include/linux/grsecurity.h
79877@@ -0,0 +1,248 @@
79878+#ifndef GR_SECURITY_H
79879+#define GR_SECURITY_H
79880+#include <linux/fs.h>
79881+#include <linux/fs_struct.h>
79882+#include <linux/binfmts.h>
79883+#include <linux/gracl.h>
79884+
79885+/* notify of brain-dead configs */
79886+#if defined(CONFIG_GRKERNSEC_PROC_USER) && defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
79887+#error "CONFIG_GRKERNSEC_PROC_USER and CONFIG_GRKERNSEC_PROC_USERGROUP cannot both be enabled."
79888+#endif
79889+#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
79890+#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
79891+#endif
79892+#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
79893+#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
79894+#endif
79895+#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
79896+#error "CONFIG_PAX enabled, but no PaX options are enabled."
79897+#endif
79898+
79899+int gr_handle_new_usb(void);
79900+
79901+void gr_handle_brute_attach(int dumpable);
79902+void gr_handle_brute_check(void);
79903+void gr_handle_kernel_exploit(void);
79904+
79905+char gr_roletype_to_char(void);
79906+
79907+int gr_proc_is_restricted(void);
79908+
79909+int gr_acl_enable_at_secure(void);
79910+
79911+int gr_check_user_change(kuid_t real, kuid_t effective, kuid_t fs);
79912+int gr_check_group_change(kgid_t real, kgid_t effective, kgid_t fs);
79913+
79914+void gr_del_task_from_ip_table(struct task_struct *p);
79915+
79916+int gr_pid_is_chrooted(struct task_struct *p);
79917+int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type);
79918+int gr_handle_chroot_nice(void);
79919+int gr_handle_chroot_sysctl(const int op);
79920+int gr_handle_chroot_setpriority(struct task_struct *p,
79921+ const int niceval);
79922+int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
79923+int gr_handle_chroot_chroot(const struct dentry *dentry,
79924+ const struct vfsmount *mnt);
79925+void gr_handle_chroot_chdir(const struct path *path);
79926+int gr_handle_chroot_chmod(const struct dentry *dentry,
79927+ const struct vfsmount *mnt, const int mode);
79928+int gr_handle_chroot_mknod(const struct dentry *dentry,
79929+ const struct vfsmount *mnt, const int mode);
79930+int gr_handle_chroot_mount(const struct dentry *dentry,
79931+ const struct vfsmount *mnt,
79932+ const char *dev_name);
79933+int gr_handle_chroot_pivot(void);
79934+int gr_handle_chroot_unix(const pid_t pid);
79935+
79936+int gr_handle_rawio(const struct inode *inode);
79937+
79938+void gr_handle_ioperm(void);
79939+void gr_handle_iopl(void);
79940+void gr_handle_msr_write(void);
79941+
79942+umode_t gr_acl_umask(void);
79943+
79944+int gr_tpe_allow(const struct file *file);
79945+
79946+void gr_set_chroot_entries(struct task_struct *task, const struct path *path);
79947+void gr_clear_chroot_entries(struct task_struct *task);
79948+
79949+void gr_log_forkfail(const int retval);
79950+void gr_log_timechange(void);
79951+void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
79952+void gr_log_chdir(const struct dentry *dentry,
79953+ const struct vfsmount *mnt);
79954+void gr_log_chroot_exec(const struct dentry *dentry,
79955+ const struct vfsmount *mnt);
79956+void gr_log_remount(const char *devname, const int retval);
79957+void gr_log_unmount(const char *devname, const int retval);
79958+void gr_log_mount(const char *from, const char *to, const int retval);
79959+void gr_log_textrel(struct vm_area_struct *vma);
79960+void gr_log_ptgnustack(struct file *file);
79961+void gr_log_rwxmmap(struct file *file);
79962+void gr_log_rwxmprotect(struct vm_area_struct *vma);
79963+
79964+int gr_handle_follow_link(const struct inode *parent,
79965+ const struct inode *inode,
79966+ const struct dentry *dentry,
79967+ const struct vfsmount *mnt);
79968+int gr_handle_fifo(const struct dentry *dentry,
79969+ const struct vfsmount *mnt,
79970+ const struct dentry *dir, const int flag,
79971+ const int acc_mode);
79972+int gr_handle_hardlink(const struct dentry *dentry,
79973+ const struct vfsmount *mnt,
79974+ struct inode *inode,
79975+ const int mode, const struct filename *to);
79976+
79977+int gr_is_capable(const int cap);
79978+int gr_is_capable_nolog(const int cap);
79979+int gr_task_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
79980+int gr_task_is_capable_nolog(const struct task_struct *task, const int cap);
79981+
79982+void gr_copy_label(struct task_struct *tsk);
79983+void gr_handle_crash(struct task_struct *task, const int sig);
79984+int gr_handle_signal(const struct task_struct *p, const int sig);
79985+int gr_check_crash_uid(const kuid_t uid);
79986+int gr_check_protected_task(const struct task_struct *task);
79987+int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type);
79988+int gr_acl_handle_mmap(const struct file *file,
79989+ const unsigned long prot);
79990+int gr_acl_handle_mprotect(const struct file *file,
79991+ const unsigned long prot);
79992+int gr_check_hidden_task(const struct task_struct *tsk);
79993+__u32 gr_acl_handle_truncate(const struct dentry *dentry,
79994+ const struct vfsmount *mnt);
79995+__u32 gr_acl_handle_utime(const struct dentry *dentry,
79996+ const struct vfsmount *mnt);
79997+__u32 gr_acl_handle_access(const struct dentry *dentry,
79998+ const struct vfsmount *mnt, const int fmode);
79999+__u32 gr_acl_handle_chmod(const struct dentry *dentry,
80000+ const struct vfsmount *mnt, umode_t *mode);
80001+__u32 gr_acl_handle_chown(const struct dentry *dentry,
80002+ const struct vfsmount *mnt);
80003+__u32 gr_acl_handle_setxattr(const struct dentry *dentry,
80004+ const struct vfsmount *mnt);
80005+__u32 gr_acl_handle_removexattr(const struct dentry *dentry,
80006+ const struct vfsmount *mnt);
80007+int gr_handle_ptrace(struct task_struct *task, const long request);
80008+int gr_handle_proc_ptrace(struct task_struct *task);
80009+__u32 gr_acl_handle_execve(const struct dentry *dentry,
80010+ const struct vfsmount *mnt);
80011+int gr_check_crash_exec(const struct file *filp);
80012+int gr_acl_is_enabled(void);
80013+void gr_set_role_label(struct task_struct *task, const kuid_t uid,
80014+ const kgid_t gid);
80015+int gr_set_proc_label(const struct dentry *dentry,
80016+ const struct vfsmount *mnt,
80017+ const int unsafe_flags);
80018+__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
80019+ const struct vfsmount *mnt);
80020+__u32 gr_acl_handle_open(const struct dentry *dentry,
80021+ const struct vfsmount *mnt, int acc_mode);
80022+__u32 gr_acl_handle_creat(const struct dentry *dentry,
80023+ const struct dentry *p_dentry,
80024+ const struct vfsmount *p_mnt,
80025+ int open_flags, int acc_mode, const int imode);
80026+void gr_handle_create(const struct dentry *dentry,
80027+ const struct vfsmount *mnt);
80028+void gr_handle_proc_create(const struct dentry *dentry,
80029+ const struct inode *inode);
80030+__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
80031+ const struct dentry *parent_dentry,
80032+ const struct vfsmount *parent_mnt,
80033+ const int mode);
80034+__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
80035+ const struct dentry *parent_dentry,
80036+ const struct vfsmount *parent_mnt);
80037+__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
80038+ const struct vfsmount *mnt);
80039+void gr_handle_delete(const ino_t ino, const dev_t dev);
80040+__u32 gr_acl_handle_unlink(const struct dentry *dentry,
80041+ const struct vfsmount *mnt);
80042+__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
80043+ const struct dentry *parent_dentry,
80044+ const struct vfsmount *parent_mnt,
80045+ const struct filename *from);
80046+__u32 gr_acl_handle_link(const struct dentry *new_dentry,
80047+ const struct dentry *parent_dentry,
80048+ const struct vfsmount *parent_mnt,
80049+ const struct dentry *old_dentry,
80050+ const struct vfsmount *old_mnt, const struct filename *to);
80051+int gr_handle_symlink_owner(const struct path *link, const struct inode *target);
80052+int gr_acl_handle_rename(struct dentry *new_dentry,
80053+ struct dentry *parent_dentry,
80054+ const struct vfsmount *parent_mnt,
80055+ struct dentry *old_dentry,
80056+ struct inode *old_parent_inode,
80057+ struct vfsmount *old_mnt, const struct filename *newname);
80058+void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
80059+ struct dentry *old_dentry,
80060+ struct dentry *new_dentry,
80061+ struct vfsmount *mnt, const __u8 replace);
80062+__u32 gr_check_link(const struct dentry *new_dentry,
80063+ const struct dentry *parent_dentry,
80064+ const struct vfsmount *parent_mnt,
80065+ const struct dentry *old_dentry,
80066+ const struct vfsmount *old_mnt);
80067+int gr_acl_handle_filldir(const struct file *file, const char *name,
80068+ const unsigned int namelen, const ino_t ino);
80069+
80070+__u32 gr_acl_handle_unix(const struct dentry *dentry,
80071+ const struct vfsmount *mnt);
80072+void gr_acl_handle_exit(void);
80073+void gr_acl_handle_psacct(struct task_struct *task, const long code);
80074+int gr_acl_handle_procpidmem(const struct task_struct *task);
80075+int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
80076+int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
80077+void gr_audit_ptrace(struct task_struct *task);
80078+dev_t gr_get_dev_from_dentry(struct dentry *dentry);
80079+void gr_put_exec_file(struct task_struct *task);
80080+
80081+int gr_ptrace_readexec(struct file *file, int unsafe_flags);
80082+
80083+#if defined(CONFIG_GRKERNSEC) && (defined(CONFIG_GRKERNSEC_RESLOG) || !defined(CONFIG_GRKERNSEC_NO_RBAC))
80084+extern void gr_learn_resource(const struct task_struct *task, const int res,
80085+ const unsigned long wanted, const int gt);
80086+#else
80087+static inline void gr_learn_resource(const struct task_struct *task, const int res,
80088+ const unsigned long wanted, const int gt)
80089+{
80090+}
80091+#endif
80092+
80093+#ifdef CONFIG_GRKERNSEC_RESLOG
80094+extern void gr_log_resource(const struct task_struct *task, const int res,
80095+ const unsigned long wanted, const int gt);
80096+#else
80097+static inline void gr_log_resource(const struct task_struct *task, const int res,
80098+ const unsigned long wanted, const int gt)
80099+{
80100+}
80101+#endif
80102+
80103+#ifdef CONFIG_GRKERNSEC
80104+void task_grsec_rbac(struct seq_file *m, struct task_struct *p);
80105+void gr_handle_vm86(void);
80106+void gr_handle_mem_readwrite(u64 from, u64 to);
80107+
80108+void gr_log_badprocpid(const char *entry);
80109+
80110+extern int grsec_enable_dmesg;
80111+extern int grsec_disable_privio;
80112+
80113+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
80114+extern kgid_t grsec_proc_gid;
80115+#endif
80116+
80117+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
80118+extern int grsec_enable_chroot_findtask;
80119+#endif
80120+#ifdef CONFIG_GRKERNSEC_SETXID
80121+extern int grsec_enable_setxid;
80122+#endif
80123+#endif
80124+
80125+#endif
80126diff --git a/include/linux/grsock.h b/include/linux/grsock.h
80127new file mode 100644
80128index 0000000..e7ffaaf
80129--- /dev/null
80130+++ b/include/linux/grsock.h
80131@@ -0,0 +1,19 @@
80132+#ifndef __GRSOCK_H
80133+#define __GRSOCK_H
80134+
80135+extern void gr_attach_curr_ip(const struct sock *sk);
80136+extern int gr_handle_sock_all(const int family, const int type,
80137+ const int protocol);
80138+extern int gr_handle_sock_server(const struct sockaddr *sck);
80139+extern int gr_handle_sock_server_other(const struct sock *sck);
80140+extern int gr_handle_sock_client(const struct sockaddr *sck);
80141+extern int gr_search_connect(struct socket * sock,
80142+ struct sockaddr_in * addr);
80143+extern int gr_search_bind(struct socket * sock,
80144+ struct sockaddr_in * addr);
80145+extern int gr_search_listen(struct socket * sock);
80146+extern int gr_search_accept(struct socket * sock);
80147+extern int gr_search_socket(const int domain, const int type,
80148+ const int protocol);
80149+
80150+#endif
80151diff --git a/include/linux/hash.h b/include/linux/hash.h
80152index bd1754c..8240892 100644
80153--- a/include/linux/hash.h
80154+++ b/include/linux/hash.h
80155@@ -83,7 +83,7 @@ static inline u32 hash32_ptr(const void *ptr)
80156 struct fast_hash_ops {
80157 u32 (*hash)(const void *data, u32 len, u32 seed);
80158 u32 (*hash2)(const u32 *data, u32 len, u32 seed);
80159-};
80160+} __no_const;
80161
80162 /**
80163 * arch_fast_hash - Caclulates a hash over a given buffer that can have
80164diff --git a/include/linux/highmem.h b/include/linux/highmem.h
80165index 7fb31da..08b5114 100644
80166--- a/include/linux/highmem.h
80167+++ b/include/linux/highmem.h
80168@@ -189,6 +189,18 @@ static inline void clear_highpage(struct page *page)
80169 kunmap_atomic(kaddr);
80170 }
80171
80172+static inline void sanitize_highpage(struct page *page)
80173+{
80174+ void *kaddr;
80175+ unsigned long flags;
80176+
80177+ local_irq_save(flags);
80178+ kaddr = kmap_atomic(page);
80179+ clear_page(kaddr);
80180+ kunmap_atomic(kaddr);
80181+ local_irq_restore(flags);
80182+}
80183+
80184 static inline void zero_user_segments(struct page *page,
80185 unsigned start1, unsigned end1,
80186 unsigned start2, unsigned end2)
80187diff --git a/include/linux/hwmon-sysfs.h b/include/linux/hwmon-sysfs.h
80188index 1c7b89a..7dda400 100644
80189--- a/include/linux/hwmon-sysfs.h
80190+++ b/include/linux/hwmon-sysfs.h
80191@@ -25,7 +25,8 @@
80192 struct sensor_device_attribute{
80193 struct device_attribute dev_attr;
80194 int index;
80195-};
80196+} __do_const;
80197+typedef struct sensor_device_attribute __no_const sensor_device_attribute_no_const;
80198 #define to_sensor_dev_attr(_dev_attr) \
80199 container_of(_dev_attr, struct sensor_device_attribute, dev_attr)
80200
80201@@ -41,7 +42,8 @@ struct sensor_device_attribute_2 {
80202 struct device_attribute dev_attr;
80203 u8 index;
80204 u8 nr;
80205-};
80206+} __do_const;
80207+typedef struct sensor_device_attribute_2 __no_const sensor_device_attribute_2_no_const;
80208 #define to_sensor_dev_attr_2(_dev_attr) \
80209 container_of(_dev_attr, struct sensor_device_attribute_2, dev_attr)
80210
80211diff --git a/include/linux/i2c.h b/include/linux/i2c.h
80212index deddeb8..bcaf62d 100644
80213--- a/include/linux/i2c.h
80214+++ b/include/linux/i2c.h
80215@@ -378,6 +378,7 @@ struct i2c_algorithm {
80216 /* To determine what the adapter supports */
80217 u32 (*functionality) (struct i2c_adapter *);
80218 };
80219+typedef struct i2c_algorithm __no_const i2c_algorithm_no_const;
80220
80221 /**
80222 * struct i2c_bus_recovery_info - I2C bus recovery information
80223diff --git a/include/linux/i2o.h b/include/linux/i2o.h
80224index d23c3c2..eb63c81 100644
80225--- a/include/linux/i2o.h
80226+++ b/include/linux/i2o.h
80227@@ -565,7 +565,7 @@ struct i2o_controller {
80228 struct i2o_device *exec; /* Executive */
80229 #if BITS_PER_LONG == 64
80230 spinlock_t context_list_lock; /* lock for context_list */
80231- atomic_t context_list_counter; /* needed for unique contexts */
80232+ atomic_unchecked_t context_list_counter; /* needed for unique contexts */
80233 struct list_head context_list; /* list of context id's
80234 and pointers */
80235 #endif
80236diff --git a/include/linux/if_pppox.h b/include/linux/if_pppox.h
80237index aff7ad8..3942bbd 100644
80238--- a/include/linux/if_pppox.h
80239+++ b/include/linux/if_pppox.h
80240@@ -76,7 +76,7 @@ struct pppox_proto {
80241 int (*ioctl)(struct socket *sock, unsigned int cmd,
80242 unsigned long arg);
80243 struct module *owner;
80244-};
80245+} __do_const;
80246
80247 extern int register_pppox_proto(int proto_num, const struct pppox_proto *pp);
80248 extern void unregister_pppox_proto(int proto_num);
80249diff --git a/include/linux/init.h b/include/linux/init.h
80250index e168880..d9b489d 100644
80251--- a/include/linux/init.h
80252+++ b/include/linux/init.h
80253@@ -37,9 +37,17 @@
80254 * section.
80255 */
80256
80257+#define add_init_latent_entropy __latent_entropy
80258+
80259+#ifdef CONFIG_MEMORY_HOTPLUG
80260+#define add_meminit_latent_entropy
80261+#else
80262+#define add_meminit_latent_entropy __latent_entropy
80263+#endif
80264+
80265 /* These are for everybody (although not all archs will actually
80266 discard it in modules) */
80267-#define __init __section(.init.text) __cold notrace
80268+#define __init __section(.init.text) __cold notrace add_init_latent_entropy
80269 #define __initdata __section(.init.data)
80270 #define __initconst __constsection(.init.rodata)
80271 #define __exitdata __section(.exit.data)
80272@@ -100,7 +108,7 @@
80273 #define __cpuexitconst
80274
80275 /* Used for MEMORY_HOTPLUG */
80276-#define __meminit __section(.meminit.text) __cold notrace
80277+#define __meminit __section(.meminit.text) __cold notrace add_meminit_latent_entropy
80278 #define __meminitdata __section(.meminit.data)
80279 #define __meminitconst __constsection(.meminit.rodata)
80280 #define __memexit __section(.memexit.text) __exitused __cold notrace
80281diff --git a/include/linux/init_task.h b/include/linux/init_task.h
80282index 6df7f9f..d0bf699 100644
80283--- a/include/linux/init_task.h
80284+++ b/include/linux/init_task.h
80285@@ -156,6 +156,12 @@ extern struct task_group root_task_group;
80286
80287 #define INIT_TASK_COMM "swapper"
80288
80289+#ifdef CONFIG_X86
80290+#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
80291+#else
80292+#define INIT_TASK_THREAD_INFO
80293+#endif
80294+
80295 #ifdef CONFIG_RT_MUTEXES
80296 # define INIT_RT_MUTEXES(tsk) \
80297 .pi_waiters = RB_ROOT, \
80298@@ -203,6 +209,7 @@ extern struct task_group root_task_group;
80299 RCU_POINTER_INITIALIZER(cred, &init_cred), \
80300 .comm = INIT_TASK_COMM, \
80301 .thread = INIT_THREAD, \
80302+ INIT_TASK_THREAD_INFO \
80303 .fs = &init_fs, \
80304 .files = &init_files, \
80305 .signal = &init_signals, \
80306diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
80307index 203c43d..605836b 100644
80308--- a/include/linux/interrupt.h
80309+++ b/include/linux/interrupt.h
80310@@ -411,8 +411,8 @@ extern const char * const softirq_to_name[NR_SOFTIRQS];
80311
80312 struct softirq_action
80313 {
80314- void (*action)(struct softirq_action *);
80315-};
80316+ void (*action)(void);
80317+} __no_const;
80318
80319 asmlinkage void do_softirq(void);
80320 asmlinkage void __do_softirq(void);
80321@@ -426,7 +426,7 @@ static inline void do_softirq_own_stack(void)
80322 }
80323 #endif
80324
80325-extern void open_softirq(int nr, void (*action)(struct softirq_action *));
80326+extern void open_softirq(int nr, void (*action)(void));
80327 extern void softirq_init(void);
80328 extern void __raise_softirq_irqoff(unsigned int nr);
80329
80330diff --git a/include/linux/iommu.h b/include/linux/iommu.h
80331index b96a5b2..2732d1c 100644
80332--- a/include/linux/iommu.h
80333+++ b/include/linux/iommu.h
80334@@ -131,7 +131,7 @@ struct iommu_ops {
80335 u32 (*domain_get_windows)(struct iommu_domain *domain);
80336
80337 unsigned long pgsize_bitmap;
80338-};
80339+} __do_const;
80340
80341 #define IOMMU_GROUP_NOTIFY_ADD_DEVICE 1 /* Device added */
80342 #define IOMMU_GROUP_NOTIFY_DEL_DEVICE 2 /* Pre Device removed */
80343diff --git a/include/linux/ioport.h b/include/linux/ioport.h
80344index 89b7c24..382af74 100644
80345--- a/include/linux/ioport.h
80346+++ b/include/linux/ioport.h
80347@@ -161,7 +161,7 @@ struct resource *lookup_resource(struct resource *root, resource_size_t start);
80348 int adjust_resource(struct resource *res, resource_size_t start,
80349 resource_size_t size);
80350 resource_size_t resource_alignment(struct resource *res);
80351-static inline resource_size_t resource_size(const struct resource *res)
80352+static inline resource_size_t __intentional_overflow(-1) resource_size(const struct resource *res)
80353 {
80354 return res->end - res->start + 1;
80355 }
80356diff --git a/include/linux/ipc_namespace.h b/include/linux/ipc_namespace.h
80357index 35e7eca..6afb7ad 100644
80358--- a/include/linux/ipc_namespace.h
80359+++ b/include/linux/ipc_namespace.h
80360@@ -69,7 +69,7 @@ struct ipc_namespace {
80361 struct user_namespace *user_ns;
80362
80363 unsigned int proc_inum;
80364-};
80365+} __randomize_layout;
80366
80367 extern struct ipc_namespace init_ipc_ns;
80368 extern atomic_t nr_ipc_ns;
80369diff --git a/include/linux/irq.h b/include/linux/irq.h
80370index ef1ac9f..e1db06c 100644
80371--- a/include/linux/irq.h
80372+++ b/include/linux/irq.h
80373@@ -338,7 +338,8 @@ struct irq_chip {
80374 void (*irq_print_chip)(struct irq_data *data, struct seq_file *p);
80375
80376 unsigned long flags;
80377-};
80378+} __do_const;
80379+typedef struct irq_chip __no_const irq_chip_no_const;
80380
80381 /*
80382 * irq_chip specific flags
80383diff --git a/include/linux/irqchip/arm-gic.h b/include/linux/irqchip/arm-gic.h
80384index 0ceb389..eed3fb8 100644
80385--- a/include/linux/irqchip/arm-gic.h
80386+++ b/include/linux/irqchip/arm-gic.h
80387@@ -73,9 +73,11 @@
80388
80389 #ifndef __ASSEMBLY__
80390
80391+#include <linux/irq.h>
80392+
80393 struct device_node;
80394
80395-extern struct irq_chip gic_arch_extn;
80396+extern irq_chip_no_const gic_arch_extn;
80397
80398 void gic_init_bases(unsigned int, int, void __iomem *, void __iomem *,
80399 u32 offset, struct device_node *);
80400diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h
80401index 1f44466..b481806 100644
80402--- a/include/linux/jiffies.h
80403+++ b/include/linux/jiffies.h
80404@@ -292,20 +292,20 @@ extern unsigned long preset_lpj;
80405 /*
80406 * Convert various time units to each other:
80407 */
80408-extern unsigned int jiffies_to_msecs(const unsigned long j);
80409-extern unsigned int jiffies_to_usecs(const unsigned long j);
80410+extern unsigned int jiffies_to_msecs(const unsigned long j) __intentional_overflow(-1);
80411+extern unsigned int jiffies_to_usecs(const unsigned long j) __intentional_overflow(-1);
80412
80413-static inline u64 jiffies_to_nsecs(const unsigned long j)
80414+static inline u64 __intentional_overflow(-1) jiffies_to_nsecs(const unsigned long j)
80415 {
80416 return (u64)jiffies_to_usecs(j) * NSEC_PER_USEC;
80417 }
80418
80419-extern unsigned long msecs_to_jiffies(const unsigned int m);
80420-extern unsigned long usecs_to_jiffies(const unsigned int u);
80421+extern unsigned long msecs_to_jiffies(const unsigned int m) __intentional_overflow(-1);
80422+extern unsigned long usecs_to_jiffies(const unsigned int u) __intentional_overflow(-1);
80423 extern unsigned long timespec_to_jiffies(const struct timespec *value);
80424 extern void jiffies_to_timespec(const unsigned long jiffies,
80425- struct timespec *value);
80426-extern unsigned long timeval_to_jiffies(const struct timeval *value);
80427+ struct timespec *value) __intentional_overflow(-1);
80428+extern unsigned long timeval_to_jiffies(const struct timeval *value) __intentional_overflow(-1);
80429 extern void jiffies_to_timeval(const unsigned long jiffies,
80430 struct timeval *value);
80431
80432diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
80433index 6883e19..e854fcb 100644
80434--- a/include/linux/kallsyms.h
80435+++ b/include/linux/kallsyms.h
80436@@ -15,7 +15,8 @@
80437
80438 struct module;
80439
80440-#ifdef CONFIG_KALLSYMS
80441+#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS)
80442+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
80443 /* Lookup the address for a symbol. Returns 0 if not found. */
80444 unsigned long kallsyms_lookup_name(const char *name);
80445
80446@@ -106,6 +107,21 @@ static inline int lookup_symbol_attrs(unsigned long addr, unsigned long *size, u
80447 /* Stupid that this does nothing, but I didn't create this mess. */
80448 #define __print_symbol(fmt, addr)
80449 #endif /*CONFIG_KALLSYMS*/
80450+#else /* when included by kallsyms.c, vsnprintf.c, kprobes.c, or
80451+ arch/x86/kernel/dumpstack.c, with HIDESYM enabled */
80452+extern unsigned long kallsyms_lookup_name(const char *name);
80453+extern void __print_symbol(const char *fmt, unsigned long address);
80454+extern int sprint_backtrace(char *buffer, unsigned long address);
80455+extern int sprint_symbol(char *buffer, unsigned long address);
80456+extern int sprint_symbol_no_offset(char *buffer, unsigned long address);
80457+const char *kallsyms_lookup(unsigned long addr,
80458+ unsigned long *symbolsize,
80459+ unsigned long *offset,
80460+ char **modname, char *namebuf);
80461+extern int kallsyms_lookup_size_offset(unsigned long addr,
80462+ unsigned long *symbolsize,
80463+ unsigned long *offset);
80464+#endif
80465
80466 /* This macro allows us to keep printk typechecking */
80467 static __printf(1, 2)
80468diff --git a/include/linux/key-type.h b/include/linux/key-type.h
80469index a74c3a8..28d3f21 100644
80470--- a/include/linux/key-type.h
80471+++ b/include/linux/key-type.h
80472@@ -131,7 +131,7 @@ struct key_type {
80473 /* internal fields */
80474 struct list_head link; /* link in types list */
80475 struct lock_class_key lock_class; /* key->sem lock class */
80476-};
80477+} __do_const;
80478
80479 extern struct key_type key_type_keyring;
80480
80481diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h
80482index 6b06d37..c134867 100644
80483--- a/include/linux/kgdb.h
80484+++ b/include/linux/kgdb.h
80485@@ -52,7 +52,7 @@ extern int kgdb_connected;
80486 extern int kgdb_io_module_registered;
80487
80488 extern atomic_t kgdb_setting_breakpoint;
80489-extern atomic_t kgdb_cpu_doing_single_step;
80490+extern atomic_unchecked_t kgdb_cpu_doing_single_step;
80491
80492 extern struct task_struct *kgdb_usethread;
80493 extern struct task_struct *kgdb_contthread;
80494@@ -254,7 +254,7 @@ struct kgdb_arch {
80495 void (*correct_hw_break)(void);
80496
80497 void (*enable_nmi)(bool on);
80498-};
80499+} __do_const;
80500
80501 /**
80502 * struct kgdb_io - Describe the interface for an I/O driver to talk with KGDB.
80503@@ -279,7 +279,7 @@ struct kgdb_io {
80504 void (*pre_exception) (void);
80505 void (*post_exception) (void);
80506 int is_console;
80507-};
80508+} __do_const;
80509
80510 extern struct kgdb_arch arch_kgdb_ops;
80511
80512diff --git a/include/linux/kmod.h b/include/linux/kmod.h
80513index 0555cc6..40116ce 100644
80514--- a/include/linux/kmod.h
80515+++ b/include/linux/kmod.h
80516@@ -34,6 +34,8 @@ extern char modprobe_path[]; /* for sysctl */
80517 * usually useless though. */
80518 extern __printf(2, 3)
80519 int __request_module(bool wait, const char *name, ...);
80520+extern __printf(3, 4)
80521+int ___request_module(bool wait, char *param_name, const char *name, ...);
80522 #define request_module(mod...) __request_module(true, mod)
80523 #define request_module_nowait(mod...) __request_module(false, mod)
80524 #define try_then_request_module(x, mod...) \
80525@@ -57,6 +59,9 @@ struct subprocess_info {
80526 struct work_struct work;
80527 struct completion *complete;
80528 char *path;
80529+#ifdef CONFIG_GRKERNSEC
80530+ char *origpath;
80531+#endif
80532 char **argv;
80533 char **envp;
80534 int wait;
80535diff --git a/include/linux/kobject.h b/include/linux/kobject.h
80536index 926afb6..58dd6e5 100644
80537--- a/include/linux/kobject.h
80538+++ b/include/linux/kobject.h
80539@@ -116,7 +116,7 @@ struct kobj_type {
80540 struct attribute **default_attrs;
80541 const struct kobj_ns_type_operations *(*child_ns_type)(struct kobject *kobj);
80542 const void *(*namespace)(struct kobject *kobj);
80543-};
80544+} __do_const;
80545
80546 struct kobj_uevent_env {
80547 char *envp[UEVENT_NUM_ENVP];
80548@@ -139,6 +139,7 @@ struct kobj_attribute {
80549 ssize_t (*store)(struct kobject *kobj, struct kobj_attribute *attr,
80550 const char *buf, size_t count);
80551 };
80552+typedef struct kobj_attribute __no_const kobj_attribute_no_const;
80553
80554 extern const struct sysfs_ops kobj_sysfs_ops;
80555
80556@@ -166,7 +167,7 @@ struct kset {
80557 spinlock_t list_lock;
80558 struct kobject kobj;
80559 const struct kset_uevent_ops *uevent_ops;
80560-};
80561+} __randomize_layout;
80562
80563 extern void kset_init(struct kset *kset);
80564 extern int __must_check kset_register(struct kset *kset);
80565diff --git a/include/linux/kobject_ns.h b/include/linux/kobject_ns.h
80566index df32d25..fb52e27 100644
80567--- a/include/linux/kobject_ns.h
80568+++ b/include/linux/kobject_ns.h
80569@@ -44,7 +44,7 @@ struct kobj_ns_type_operations {
80570 const void *(*netlink_ns)(struct sock *sk);
80571 const void *(*initial_ns)(void);
80572 void (*drop_ns)(void *);
80573-};
80574+} __do_const;
80575
80576 int kobj_ns_type_register(const struct kobj_ns_type_operations *ops);
80577 int kobj_ns_type_registered(enum kobj_ns_type type);
80578diff --git a/include/linux/kref.h b/include/linux/kref.h
80579index 484604d..0f6c5b6 100644
80580--- a/include/linux/kref.h
80581+++ b/include/linux/kref.h
80582@@ -68,7 +68,7 @@ static inline void kref_get(struct kref *kref)
80583 static inline int kref_sub(struct kref *kref, unsigned int count,
80584 void (*release)(struct kref *kref))
80585 {
80586- WARN_ON(release == NULL);
80587+ BUG_ON(release == NULL);
80588
80589 if (atomic_sub_and_test((int) count, &kref->refcount)) {
80590 release(kref);
80591diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
80592index b8e9a43..632678d 100644
80593--- a/include/linux/kvm_host.h
80594+++ b/include/linux/kvm_host.h
80595@@ -455,7 +455,7 @@ static inline void kvm_irqfd_exit(void)
80596 {
80597 }
80598 #endif
80599-int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
80600+int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
80601 struct module *module);
80602 void kvm_exit(void);
80603
80604@@ -621,7 +621,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
80605 struct kvm_guest_debug *dbg);
80606 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
80607
80608-int kvm_arch_init(void *opaque);
80609+int kvm_arch_init(const void *opaque);
80610 void kvm_arch_exit(void);
80611
80612 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
80613diff --git a/include/linux/libata.h b/include/linux/libata.h
80614index 3fee55e..42565b7 100644
80615--- a/include/linux/libata.h
80616+++ b/include/linux/libata.h
80617@@ -976,7 +976,7 @@ struct ata_port_operations {
80618 * fields must be pointers.
80619 */
80620 const struct ata_port_operations *inherits;
80621-};
80622+} __do_const;
80623
80624 struct ata_port_info {
80625 unsigned long flags;
80626diff --git a/include/linux/linkage.h b/include/linux/linkage.h
80627index a6a42dd..6c5ebce 100644
80628--- a/include/linux/linkage.h
80629+++ b/include/linux/linkage.h
80630@@ -36,6 +36,7 @@
80631 #endif
80632
80633 #define __page_aligned_data __section(.data..page_aligned) __aligned(PAGE_SIZE)
80634+#define __page_aligned_rodata __read_only __aligned(PAGE_SIZE)
80635 #define __page_aligned_bss __section(.bss..page_aligned) __aligned(PAGE_SIZE)
80636
80637 /*
80638diff --git a/include/linux/list.h b/include/linux/list.h
80639index ef95941..82db65a 100644
80640--- a/include/linux/list.h
80641+++ b/include/linux/list.h
80642@@ -112,6 +112,19 @@ extern void __list_del_entry(struct list_head *entry);
80643 extern void list_del(struct list_head *entry);
80644 #endif
80645
80646+extern void __pax_list_add(struct list_head *new,
80647+ struct list_head *prev,
80648+ struct list_head *next);
80649+static inline void pax_list_add(struct list_head *new, struct list_head *head)
80650+{
80651+ __pax_list_add(new, head, head->next);
80652+}
80653+static inline void pax_list_add_tail(struct list_head *new, struct list_head *head)
80654+{
80655+ __pax_list_add(new, head->prev, head);
80656+}
80657+extern void pax_list_del(struct list_head *entry);
80658+
80659 /**
80660 * list_replace - replace old entry by new one
80661 * @old : the element to be replaced
80662@@ -145,6 +158,8 @@ static inline void list_del_init(struct list_head *entry)
80663 INIT_LIST_HEAD(entry);
80664 }
80665
80666+extern void pax_list_del_init(struct list_head *entry);
80667+
80668 /**
80669 * list_move - delete from one list and add as another's head
80670 * @list: the entry to move
80671diff --git a/include/linux/math64.h b/include/linux/math64.h
80672index c45c089..298841c 100644
80673--- a/include/linux/math64.h
80674+++ b/include/linux/math64.h
80675@@ -15,7 +15,7 @@
80676 * This is commonly provided by 32bit archs to provide an optimized 64bit
80677 * divide.
80678 */
80679-static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
80680+static inline u64 __intentional_overflow(-1) div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
80681 {
80682 *remainder = dividend % divisor;
80683 return dividend / divisor;
80684@@ -42,7 +42,7 @@ static inline u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder)
80685 /**
80686 * div64_u64 - unsigned 64bit divide with 64bit divisor
80687 */
80688-static inline u64 div64_u64(u64 dividend, u64 divisor)
80689+static inline u64 __intentional_overflow(-1) div64_u64(u64 dividend, u64 divisor)
80690 {
80691 return dividend / divisor;
80692 }
80693@@ -61,7 +61,7 @@ static inline s64 div64_s64(s64 dividend, s64 divisor)
80694 #define div64_ul(x, y) div_u64((x), (y))
80695
80696 #ifndef div_u64_rem
80697-static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
80698+static inline u64 __intentional_overflow(-1) div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
80699 {
80700 *remainder = do_div(dividend, divisor);
80701 return dividend;
80702@@ -77,7 +77,7 @@ extern u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder);
80703 #endif
80704
80705 #ifndef div64_u64
80706-extern u64 div64_u64(u64 dividend, u64 divisor);
80707+extern u64 __intentional_overflow(-1) div64_u64(u64 dividend, u64 divisor);
80708 #endif
80709
80710 #ifndef div64_s64
80711@@ -94,7 +94,7 @@ extern s64 div64_s64(s64 dividend, s64 divisor);
80712 * divide.
80713 */
80714 #ifndef div_u64
80715-static inline u64 div_u64(u64 dividend, u32 divisor)
80716+static inline u64 __intentional_overflow(-1) div_u64(u64 dividend, u32 divisor)
80717 {
80718 u32 remainder;
80719 return div_u64_rem(dividend, divisor, &remainder);
80720diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h
80721index 5f1ea75..5125ac5 100644
80722--- a/include/linux/mempolicy.h
80723+++ b/include/linux/mempolicy.h
80724@@ -91,6 +91,10 @@ static inline struct mempolicy *mpol_dup(struct mempolicy *pol)
80725 }
80726
80727 #define vma_policy(vma) ((vma)->vm_policy)
80728+static inline void set_vma_policy(struct vm_area_struct *vma, struct mempolicy *pol)
80729+{
80730+ vma->vm_policy = pol;
80731+}
80732
80733 static inline void mpol_get(struct mempolicy *pol)
80734 {
80735@@ -223,6 +227,9 @@ static inline void mpol_free_shared_policy(struct shared_policy *p)
80736 }
80737
80738 #define vma_policy(vma) NULL
80739+static inline void set_vma_policy(struct vm_area_struct *vma, struct mempolicy *pol)
80740+{
80741+}
80742
80743 static inline int
80744 vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
80745diff --git a/include/linux/mm.h b/include/linux/mm.h
80746index c1b7414..5ea2ad8 100644
80747--- a/include/linux/mm.h
80748+++ b/include/linux/mm.h
80749@@ -127,6 +127,11 @@ extern unsigned int kobjsize(const void *objp);
80750 #define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */
80751 #define VM_NONLINEAR 0x00800000 /* Is non-linear (remap_file_pages) */
80752 #define VM_ARCH_1 0x01000000 /* Architecture-specific flag */
80753+
80754+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
80755+#define VM_PAGEEXEC 0x02000000 /* vma->vm_page_prot needs special handling */
80756+#endif
80757+
80758 #define VM_DONTDUMP 0x04000000 /* Do not include in the core dump */
80759
80760 #ifdef CONFIG_MEM_SOFT_DIRTY
80761@@ -229,8 +234,8 @@ struct vm_operations_struct {
80762 /* called by access_process_vm when get_user_pages() fails, typically
80763 * for use by special VMAs that can switch between memory and hardware
80764 */
80765- int (*access)(struct vm_area_struct *vma, unsigned long addr,
80766- void *buf, int len, int write);
80767+ ssize_t (*access)(struct vm_area_struct *vma, unsigned long addr,
80768+ void *buf, size_t len, int write);
80769 #ifdef CONFIG_NUMA
80770 /*
80771 * set_policy() op must add a reference to any non-NULL @new mempolicy
80772@@ -260,6 +265,7 @@ struct vm_operations_struct {
80773 int (*remap_pages)(struct vm_area_struct *vma, unsigned long addr,
80774 unsigned long size, pgoff_t pgoff);
80775 };
80776+typedef struct vm_operations_struct __no_const vm_operations_struct_no_const;
80777
80778 struct mmu_gather;
80779 struct inode;
80780@@ -1112,8 +1118,8 @@ int follow_pfn(struct vm_area_struct *vma, unsigned long address,
80781 unsigned long *pfn);
80782 int follow_phys(struct vm_area_struct *vma, unsigned long address,
80783 unsigned int flags, unsigned long *prot, resource_size_t *phys);
80784-int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
80785- void *buf, int len, int write);
80786+ssize_t generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
80787+ void *buf, size_t len, int write);
80788
80789 static inline void unmap_shared_mapping_range(struct address_space *mapping,
80790 loff_t const holebegin, loff_t const holelen)
80791@@ -1152,9 +1158,9 @@ static inline int fixup_user_fault(struct task_struct *tsk,
80792 }
80793 #endif
80794
80795-extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write);
80796-extern int access_remote_vm(struct mm_struct *mm, unsigned long addr,
80797- void *buf, int len, int write);
80798+extern ssize_t access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, size_t len, int write);
80799+extern ssize_t access_remote_vm(struct mm_struct *mm, unsigned long addr,
80800+ void *buf, size_t len, int write);
80801
80802 long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
80803 unsigned long start, unsigned long nr_pages,
80804@@ -1186,34 +1192,6 @@ int set_page_dirty(struct page *page);
80805 int set_page_dirty_lock(struct page *page);
80806 int clear_page_dirty_for_io(struct page *page);
80807
80808-/* Is the vma a continuation of the stack vma above it? */
80809-static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
80810-{
80811- return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
80812-}
80813-
80814-static inline int stack_guard_page_start(struct vm_area_struct *vma,
80815- unsigned long addr)
80816-{
80817- return (vma->vm_flags & VM_GROWSDOWN) &&
80818- (vma->vm_start == addr) &&
80819- !vma_growsdown(vma->vm_prev, addr);
80820-}
80821-
80822-/* Is the vma a continuation of the stack vma below it? */
80823-static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
80824-{
80825- return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
80826-}
80827-
80828-static inline int stack_guard_page_end(struct vm_area_struct *vma,
80829- unsigned long addr)
80830-{
80831- return (vma->vm_flags & VM_GROWSUP) &&
80832- (vma->vm_end == addr) &&
80833- !vma_growsup(vma->vm_next, addr);
80834-}
80835-
80836 extern pid_t
80837 vm_is_stack(struct task_struct *task, struct vm_area_struct *vma, int in_group);
80838
80839@@ -1313,6 +1291,15 @@ static inline void sync_mm_rss(struct mm_struct *mm)
80840 }
80841 #endif
80842
80843+#ifdef CONFIG_MMU
80844+pgprot_t vm_get_page_prot(vm_flags_t vm_flags);
80845+#else
80846+static inline pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
80847+{
80848+ return __pgprot(0);
80849+}
80850+#endif
80851+
80852 int vma_wants_writenotify(struct vm_area_struct *vma);
80853
80854 extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
80855@@ -1331,8 +1318,15 @@ static inline int __pud_alloc(struct mm_struct *mm, pgd_t *pgd,
80856 {
80857 return 0;
80858 }
80859+
80860+static inline int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd,
80861+ unsigned long address)
80862+{
80863+ return 0;
80864+}
80865 #else
80866 int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
80867+int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
80868 #endif
80869
80870 #ifdef __PAGETABLE_PMD_FOLDED
80871@@ -1341,8 +1335,15 @@ static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud,
80872 {
80873 return 0;
80874 }
80875+
80876+static inline int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud,
80877+ unsigned long address)
80878+{
80879+ return 0;
80880+}
80881 #else
80882 int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address);
80883+int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address);
80884 #endif
80885
80886 int __pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
80887@@ -1360,11 +1361,23 @@ static inline pud_t *pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long a
80888 NULL: pud_offset(pgd, address);
80889 }
80890
80891+static inline pud_t *pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
80892+{
80893+ return (unlikely(pgd_none(*pgd)) && __pud_alloc_kernel(mm, pgd, address))?
80894+ NULL: pud_offset(pgd, address);
80895+}
80896+
80897 static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
80898 {
80899 return (unlikely(pud_none(*pud)) && __pmd_alloc(mm, pud, address))?
80900 NULL: pmd_offset(pud, address);
80901 }
80902+
80903+static inline pmd_t *pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address)
80904+{
80905+ return (unlikely(pud_none(*pud)) && __pmd_alloc_kernel(mm, pud, address))?
80906+ NULL: pmd_offset(pud, address);
80907+}
80908 #endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */
80909
80910 #if USE_SPLIT_PTE_PTLOCKS
80911@@ -1754,7 +1767,7 @@ extern int install_special_mapping(struct mm_struct *mm,
80912 unsigned long addr, unsigned long len,
80913 unsigned long flags, struct page **pages);
80914
80915-extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
80916+extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long) __intentional_overflow(-1);
80917
80918 extern unsigned long mmap_region(struct file *file, unsigned long addr,
80919 unsigned long len, vm_flags_t vm_flags, unsigned long pgoff);
80920@@ -1762,6 +1775,7 @@ extern unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
80921 unsigned long len, unsigned long prot, unsigned long flags,
80922 unsigned long pgoff, unsigned long *populate);
80923 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
80924+extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
80925
80926 #ifdef CONFIG_MMU
80927 extern int __mm_populate(unsigned long addr, unsigned long len,
80928@@ -1790,10 +1804,11 @@ struct vm_unmapped_area_info {
80929 unsigned long high_limit;
80930 unsigned long align_mask;
80931 unsigned long align_offset;
80932+ unsigned long threadstack_offset;
80933 };
80934
80935-extern unsigned long unmapped_area(struct vm_unmapped_area_info *info);
80936-extern unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info);
80937+extern unsigned long unmapped_area(const struct vm_unmapped_area_info *info);
80938+extern unsigned long unmapped_area_topdown(const struct vm_unmapped_area_info *info);
80939
80940 /*
80941 * Search for an unmapped address range.
80942@@ -1805,7 +1820,7 @@ extern unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info);
80943 * - satisfies (begin_addr & align_mask) == (align_offset & align_mask)
80944 */
80945 static inline unsigned long
80946-vm_unmapped_area(struct vm_unmapped_area_info *info)
80947+vm_unmapped_area(const struct vm_unmapped_area_info *info)
80948 {
80949 if (!(info->flags & VM_UNMAPPED_AREA_TOPDOWN))
80950 return unmapped_area(info);
80951@@ -1868,6 +1883,10 @@ extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long add
80952 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
80953 struct vm_area_struct **pprev);
80954
80955+extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
80956+extern __must_check long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
80957+extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
80958+
80959 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
80960 NULL if none. Assume start_addr < end_addr. */
80961 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
80962@@ -1896,15 +1915,6 @@ static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
80963 return vma;
80964 }
80965
80966-#ifdef CONFIG_MMU
80967-pgprot_t vm_get_page_prot(unsigned long vm_flags);
80968-#else
80969-static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
80970-{
80971- return __pgprot(0);
80972-}
80973-#endif
80974-
80975 #ifdef CONFIG_NUMA_BALANCING
80976 unsigned long change_prot_numa(struct vm_area_struct *vma,
80977 unsigned long start, unsigned long end);
80978@@ -1956,6 +1966,11 @@ void vm_stat_account(struct mm_struct *, unsigned long, struct file *, long);
80979 static inline void vm_stat_account(struct mm_struct *mm,
80980 unsigned long flags, struct file *file, long pages)
80981 {
80982+
80983+#ifdef CONFIG_PAX_RANDMMAP
80984+ if (!(mm->pax_flags & MF_PAX_RANDMMAP) || (flags & (VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)))
80985+#endif
80986+
80987 mm->total_vm += pages;
80988 }
80989 #endif /* CONFIG_PROC_FS */
80990@@ -2037,7 +2052,7 @@ extern int unpoison_memory(unsigned long pfn);
80991 extern int sysctl_memory_failure_early_kill;
80992 extern int sysctl_memory_failure_recovery;
80993 extern void shake_page(struct page *p, int access);
80994-extern atomic_long_t num_poisoned_pages;
80995+extern atomic_long_unchecked_t num_poisoned_pages;
80996 extern int soft_offline_page(struct page *page, int flags);
80997
80998 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS)
80999@@ -2072,5 +2087,11 @@ void __init setup_nr_node_ids(void);
81000 static inline void setup_nr_node_ids(void) {}
81001 #endif
81002
81003+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
81004+extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
81005+#else
81006+static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
81007+#endif
81008+
81009 #endif /* __KERNEL__ */
81010 #endif /* _LINUX_MM_H */
81011diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
81012index 290901a..e99b01c 100644
81013--- a/include/linux/mm_types.h
81014+++ b/include/linux/mm_types.h
81015@@ -307,7 +307,9 @@ struct vm_area_struct {
81016 #ifdef CONFIG_NUMA
81017 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
81018 #endif
81019-};
81020+
81021+ struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
81022+} __randomize_layout;
81023
81024 struct core_thread {
81025 struct task_struct *task;
81026@@ -453,7 +455,25 @@ struct mm_struct {
81027 bool tlb_flush_pending;
81028 #endif
81029 struct uprobes_state uprobes_state;
81030-};
81031+
81032+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
81033+ unsigned long pax_flags;
81034+#endif
81035+
81036+#ifdef CONFIG_PAX_DLRESOLVE
81037+ unsigned long call_dl_resolve;
81038+#endif
81039+
81040+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
81041+ unsigned long call_syscall;
81042+#endif
81043+
81044+#ifdef CONFIG_PAX_ASLR
81045+ unsigned long delta_mmap; /* randomized offset */
81046+ unsigned long delta_stack; /* randomized offset */
81047+#endif
81048+
81049+} __randomize_layout;
81050
81051 static inline void mm_init_cpumask(struct mm_struct *mm)
81052 {
81053diff --git a/include/linux/mmiotrace.h b/include/linux/mmiotrace.h
81054index c5d5278..f0b68c8 100644
81055--- a/include/linux/mmiotrace.h
81056+++ b/include/linux/mmiotrace.h
81057@@ -46,7 +46,7 @@ extern int kmmio_handler(struct pt_regs *regs, unsigned long addr);
81058 /* Called from ioremap.c */
81059 extern void mmiotrace_ioremap(resource_size_t offset, unsigned long size,
81060 void __iomem *addr);
81061-extern void mmiotrace_iounmap(volatile void __iomem *addr);
81062+extern void mmiotrace_iounmap(const volatile void __iomem *addr);
81063
81064 /* For anyone to insert markers. Remember trailing newline. */
81065 extern __printf(1, 2) int mmiotrace_printk(const char *fmt, ...);
81066@@ -66,7 +66,7 @@ static inline void mmiotrace_ioremap(resource_size_t offset,
81067 {
81068 }
81069
81070-static inline void mmiotrace_iounmap(volatile void __iomem *addr)
81071+static inline void mmiotrace_iounmap(const volatile void __iomem *addr)
81072 {
81073 }
81074
81075diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
81076index 9b61b9b..52147d6b 100644
81077--- a/include/linux/mmzone.h
81078+++ b/include/linux/mmzone.h
81079@@ -396,7 +396,7 @@ struct zone {
81080 unsigned long flags; /* zone flags, see below */
81081
81082 /* Zone statistics */
81083- atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
81084+ atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
81085
81086 /*
81087 * The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on
81088diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
81089index 45e9214..a7227d6 100644
81090--- a/include/linux/mod_devicetable.h
81091+++ b/include/linux/mod_devicetable.h
81092@@ -13,7 +13,7 @@
81093 typedef unsigned long kernel_ulong_t;
81094 #endif
81095
81096-#define PCI_ANY_ID (~0)
81097+#define PCI_ANY_ID ((__u16)~0)
81098
81099 struct pci_device_id {
81100 __u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/
81101@@ -139,7 +139,7 @@ struct usb_device_id {
81102 #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
81103 #define USB_DEVICE_ID_MATCH_INT_NUMBER 0x0400
81104
81105-#define HID_ANY_ID (~0)
81106+#define HID_ANY_ID (~0U)
81107 #define HID_BUS_ANY 0xffff
81108 #define HID_GROUP_ANY 0x0000
81109
81110@@ -467,7 +467,7 @@ struct dmi_system_id {
81111 const char *ident;
81112 struct dmi_strmatch matches[4];
81113 void *driver_data;
81114-};
81115+} __do_const;
81116 /*
81117 * struct dmi_device_id appears during expansion of
81118 * "MODULE_DEVICE_TABLE(dmi, x)". Compiler doesn't look inside it
81119diff --git a/include/linux/module.h b/include/linux/module.h
81120index eaf60ff..641979a 100644
81121--- a/include/linux/module.h
81122+++ b/include/linux/module.h
81123@@ -17,9 +17,11 @@
81124 #include <linux/moduleparam.h>
81125 #include <linux/tracepoint.h>
81126 #include <linux/export.h>
81127+#include <linux/fs.h>
81128
81129 #include <linux/percpu.h>
81130 #include <asm/module.h>
81131+#include <asm/pgtable.h>
81132
81133 /* In stripped ARM and x86-64 modules, ~ is surprisingly rare. */
81134 #define MODULE_SIG_STRING "~Module signature appended~\n"
81135@@ -42,7 +44,7 @@ struct module_kobject {
81136 struct kobject *drivers_dir;
81137 struct module_param_attrs *mp;
81138 struct completion *kobj_completion;
81139-};
81140+} __randomize_layout;
81141
81142 struct module_attribute {
81143 struct attribute attr;
81144@@ -54,12 +56,13 @@ struct module_attribute {
81145 int (*test)(struct module *);
81146 void (*free)(struct module *);
81147 };
81148+typedef struct module_attribute __no_const module_attribute_no_const;
81149
81150 struct module_version_attribute {
81151 struct module_attribute mattr;
81152 const char *module_name;
81153 const char *version;
81154-} __attribute__ ((__aligned__(sizeof(void *))));
81155+} __do_const __attribute__ ((__aligned__(sizeof(void *))));
81156
81157 extern ssize_t __modver_version_show(struct module_attribute *,
81158 struct module_kobject *, char *);
81159@@ -238,7 +241,7 @@ struct module {
81160
81161 /* Sysfs stuff. */
81162 struct module_kobject mkobj;
81163- struct module_attribute *modinfo_attrs;
81164+ module_attribute_no_const *modinfo_attrs;
81165 const char *version;
81166 const char *srcversion;
81167 struct kobject *holders_dir;
81168@@ -287,19 +290,16 @@ struct module {
81169 int (*init)(void);
81170
81171 /* If this is non-NULL, vfree after init() returns */
81172- void *module_init;
81173+ void *module_init_rx, *module_init_rw;
81174
81175 /* Here is the actual code + data, vfree'd on unload. */
81176- void *module_core;
81177+ void *module_core_rx, *module_core_rw;
81178
81179 /* Here are the sizes of the init and core sections */
81180- unsigned int init_size, core_size;
81181+ unsigned int init_size_rw, core_size_rw;
81182
81183 /* The size of the executable code in each section. */
81184- unsigned int init_text_size, core_text_size;
81185-
81186- /* Size of RO sections of the module (text+rodata) */
81187- unsigned int init_ro_size, core_ro_size;
81188+ unsigned int init_size_rx, core_size_rx;
81189
81190 /* Arch-specific module values */
81191 struct mod_arch_specific arch;
81192@@ -355,6 +355,10 @@ struct module {
81193 #ifdef CONFIG_EVENT_TRACING
81194 struct ftrace_event_call **trace_events;
81195 unsigned int num_trace_events;
81196+ struct file_operations trace_id;
81197+ struct file_operations trace_enable;
81198+ struct file_operations trace_format;
81199+ struct file_operations trace_filter;
81200 #endif
81201 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
81202 unsigned int num_ftrace_callsites;
81203@@ -378,7 +382,7 @@ struct module {
81204 ctor_fn_t *ctors;
81205 unsigned int num_ctors;
81206 #endif
81207-};
81208+} __randomize_layout;
81209 #ifndef MODULE_ARCH_INIT
81210 #define MODULE_ARCH_INIT {}
81211 #endif
81212@@ -399,16 +403,46 @@ bool is_module_address(unsigned long addr);
81213 bool is_module_percpu_address(unsigned long addr);
81214 bool is_module_text_address(unsigned long addr);
81215
81216+static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
81217+{
81218+
81219+#ifdef CONFIG_PAX_KERNEXEC
81220+ if (ktla_ktva(addr) >= (unsigned long)start &&
81221+ ktla_ktva(addr) < (unsigned long)start + size)
81222+ return 1;
81223+#endif
81224+
81225+ return ((void *)addr >= start && (void *)addr < start + size);
81226+}
81227+
81228+static inline int within_module_core_rx(unsigned long addr, const struct module *mod)
81229+{
81230+ return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
81231+}
81232+
81233+static inline int within_module_core_rw(unsigned long addr, const struct module *mod)
81234+{
81235+ return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
81236+}
81237+
81238+static inline int within_module_init_rx(unsigned long addr, const struct module *mod)
81239+{
81240+ return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
81241+}
81242+
81243+static inline int within_module_init_rw(unsigned long addr, const struct module *mod)
81244+{
81245+ return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
81246+}
81247+
81248 static inline int within_module_core(unsigned long addr, const struct module *mod)
81249 {
81250- return (unsigned long)mod->module_core <= addr &&
81251- addr < (unsigned long)mod->module_core + mod->core_size;
81252+ return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
81253 }
81254
81255 static inline int within_module_init(unsigned long addr, const struct module *mod)
81256 {
81257- return (unsigned long)mod->module_init <= addr &&
81258- addr < (unsigned long)mod->module_init + mod->init_size;
81259+ return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
81260 }
81261
81262 /* Search for module by name: must hold module_mutex. */
81263diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h
81264index 560ca53..ef621ef 100644
81265--- a/include/linux/moduleloader.h
81266+++ b/include/linux/moduleloader.h
81267@@ -25,9 +25,21 @@ unsigned int arch_mod_section_prepend(struct module *mod, unsigned int section);
81268 sections. Returns NULL on failure. */
81269 void *module_alloc(unsigned long size);
81270
81271+#ifdef CONFIG_PAX_KERNEXEC
81272+void *module_alloc_exec(unsigned long size);
81273+#else
81274+#define module_alloc_exec(x) module_alloc(x)
81275+#endif
81276+
81277 /* Free memory returned from module_alloc. */
81278 void module_free(struct module *mod, void *module_region);
81279
81280+#ifdef CONFIG_PAX_KERNEXEC
81281+void module_free_exec(struct module *mod, void *module_region);
81282+#else
81283+#define module_free_exec(x, y) module_free((x), (y))
81284+#endif
81285+
81286 /*
81287 * Apply the given relocation to the (simplified) ELF. Return -error
81288 * or 0.
81289@@ -45,7 +57,9 @@ static inline int apply_relocate(Elf_Shdr *sechdrs,
81290 unsigned int relsec,
81291 struct module *me)
81292 {
81293+#ifdef CONFIG_MODULES
81294 printk(KERN_ERR "module %s: REL relocation unsupported\n", me->name);
81295+#endif
81296 return -ENOEXEC;
81297 }
81298 #endif
81299@@ -67,7 +81,9 @@ static inline int apply_relocate_add(Elf_Shdr *sechdrs,
81300 unsigned int relsec,
81301 struct module *me)
81302 {
81303+#ifdef CONFIG_MODULES
81304 printk(KERN_ERR "module %s: REL relocation unsupported\n", me->name);
81305+#endif
81306 return -ENOEXEC;
81307 }
81308 #endif
81309diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
81310index c3eb102..073c4a6 100644
81311--- a/include/linux/moduleparam.h
81312+++ b/include/linux/moduleparam.h
81313@@ -295,7 +295,7 @@ static inline void __kernel_param_unlock(void)
81314 * @len is usually just sizeof(string).
81315 */
81316 #define module_param_string(name, string, len, perm) \
81317- static const struct kparam_string __param_string_##name \
81318+ static const struct kparam_string __param_string_##name __used \
81319 = { len, string }; \
81320 __module_param_call(MODULE_PARAM_PREFIX, name, \
81321 &param_ops_string, \
81322@@ -434,7 +434,7 @@ extern int param_set_bint(const char *val, const struct kernel_param *kp);
81323 */
81324 #define module_param_array_named(name, array, type, nump, perm) \
81325 param_check_##type(name, &(array)[0]); \
81326- static const struct kparam_array __param_arr_##name \
81327+ static const struct kparam_array __param_arr_##name __used \
81328 = { .max = ARRAY_SIZE(array), .num = nump, \
81329 .ops = &param_ops_##type, \
81330 .elemsize = sizeof(array[0]), .elem = array }; \
81331diff --git a/include/linux/mount.h b/include/linux/mount.h
81332index 839bac2..a96b37c 100644
81333--- a/include/linux/mount.h
81334+++ b/include/linux/mount.h
81335@@ -59,7 +59,7 @@ struct vfsmount {
81336 struct dentry *mnt_root; /* root of the mounted tree */
81337 struct super_block *mnt_sb; /* pointer to superblock */
81338 int mnt_flags;
81339-};
81340+} __randomize_layout;
81341
81342 struct file; /* forward dec */
81343
81344diff --git a/include/linux/namei.h b/include/linux/namei.h
81345index 492de72..1bddcd4 100644
81346--- a/include/linux/namei.h
81347+++ b/include/linux/namei.h
81348@@ -19,7 +19,7 @@ struct nameidata {
81349 unsigned seq, m_seq;
81350 int last_type;
81351 unsigned depth;
81352- char *saved_names[MAX_NESTED_LINKS + 1];
81353+ const char *saved_names[MAX_NESTED_LINKS + 1];
81354 };
81355
81356 /*
81357@@ -83,12 +83,12 @@ extern void unlock_rename(struct dentry *, struct dentry *);
81358
81359 extern void nd_jump_link(struct nameidata *nd, struct path *path);
81360
81361-static inline void nd_set_link(struct nameidata *nd, char *path)
81362+static inline void nd_set_link(struct nameidata *nd, const char *path)
81363 {
81364 nd->saved_names[nd->depth] = path;
81365 }
81366
81367-static inline char *nd_get_link(struct nameidata *nd)
81368+static inline const char *nd_get_link(const struct nameidata *nd)
81369 {
81370 return nd->saved_names[nd->depth];
81371 }
81372diff --git a/include/linux/net.h b/include/linux/net.h
81373index 17d8339..81656c0 100644
81374--- a/include/linux/net.h
81375+++ b/include/linux/net.h
81376@@ -192,7 +192,7 @@ struct net_proto_family {
81377 int (*create)(struct net *net, struct socket *sock,
81378 int protocol, int kern);
81379 struct module *owner;
81380-};
81381+} __do_const;
81382
81383 struct iovec;
81384 struct kvec;
81385diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
81386index 911718f..f673407 100644
81387--- a/include/linux/netdevice.h
81388+++ b/include/linux/netdevice.h
81389@@ -1147,6 +1147,7 @@ struct net_device_ops {
81390 void *priv);
81391 int (*ndo_get_lock_subclass)(struct net_device *dev);
81392 };
81393+typedef struct net_device_ops __no_const net_device_ops_no_const;
81394
81395 /*
81396 * The DEVICE structure.
81397@@ -1229,7 +1230,7 @@ struct net_device {
81398 int iflink;
81399
81400 struct net_device_stats stats;
81401- atomic_long_t rx_dropped; /* dropped packets by core network
81402+ atomic_long_unchecked_t rx_dropped; /* dropped packets by core network
81403 * Do not use this in drivers.
81404 */
81405
81406diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
81407index 2077489..a15e561 100644
81408--- a/include/linux/netfilter.h
81409+++ b/include/linux/netfilter.h
81410@@ -84,7 +84,7 @@ struct nf_sockopt_ops {
81411 #endif
81412 /* Use the module struct to lock set/get code in place */
81413 struct module *owner;
81414-};
81415+} __do_const;
81416
81417 /* Function to register/unregister hook points. */
81418 int nf_register_hook(struct nf_hook_ops *reg);
81419diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h
81420index 28c7436..2d6156a 100644
81421--- a/include/linux/netfilter/nfnetlink.h
81422+++ b/include/linux/netfilter/nfnetlink.h
81423@@ -19,7 +19,7 @@ struct nfnl_callback {
81424 const struct nlattr * const cda[]);
81425 const struct nla_policy *policy; /* netlink attribute policy */
81426 const u_int16_t attr_count; /* number of nlattr's */
81427-};
81428+} __do_const;
81429
81430 struct nfnetlink_subsystem {
81431 const char *name;
81432diff --git a/include/linux/netfilter/xt_gradm.h b/include/linux/netfilter/xt_gradm.h
81433new file mode 100644
81434index 0000000..33f4af8
81435--- /dev/null
81436+++ b/include/linux/netfilter/xt_gradm.h
81437@@ -0,0 +1,9 @@
81438+#ifndef _LINUX_NETFILTER_XT_GRADM_H
81439+#define _LINUX_NETFILTER_XT_GRADM_H 1
81440+
81441+struct xt_gradm_mtinfo {
81442+ __u16 flags;
81443+ __u16 invflags;
81444+};
81445+
81446+#endif
81447diff --git a/include/linux/netlink.h b/include/linux/netlink.h
81448index aad8eea..034cda7 100644
81449--- a/include/linux/netlink.h
81450+++ b/include/linux/netlink.h
81451@@ -16,9 +16,10 @@ static inline struct nlmsghdr *nlmsg_hdr(const struct sk_buff *skb)
81452 }
81453
81454 enum netlink_skb_flags {
81455- NETLINK_SKB_MMAPED = 0x1, /* Packet data is mmaped */
81456- NETLINK_SKB_TX = 0x2, /* Packet was sent by userspace */
81457- NETLINK_SKB_DELIVERED = 0x4, /* Packet was delivered */
81458+ NETLINK_SKB_MMAPED = 0x1, /* Packet data is mmaped */
81459+ NETLINK_SKB_TX = 0x2, /* Packet was sent by userspace */
81460+ NETLINK_SKB_DELIVERED = 0x4, /* Packet was delivered */
81461+ NETLINK_SKB_DST = 0x8, /* Dst set in sendto or sendmsg */
81462 };
81463
81464 struct netlink_skb_parms {
81465@@ -169,4 +170,11 @@ struct netlink_tap {
81466 extern int netlink_add_tap(struct netlink_tap *nt);
81467 extern int netlink_remove_tap(struct netlink_tap *nt);
81468
81469+bool __netlink_ns_capable(const struct netlink_skb_parms *nsp,
81470+ struct user_namespace *ns, int cap);
81471+bool netlink_ns_capable(const struct sk_buff *skb,
81472+ struct user_namespace *ns, int cap);
81473+bool netlink_capable(const struct sk_buff *skb, int cap);
81474+bool netlink_net_capable(const struct sk_buff *skb, int cap);
81475+
81476 #endif /* __LINUX_NETLINK_H */
81477diff --git a/include/linux/nls.h b/include/linux/nls.h
81478index 520681b..1d67ed2 100644
81479--- a/include/linux/nls.h
81480+++ b/include/linux/nls.h
81481@@ -31,7 +31,7 @@ struct nls_table {
81482 const unsigned char *charset2upper;
81483 struct module *owner;
81484 struct nls_table *next;
81485-};
81486+} __do_const;
81487
81488 /* this value hold the maximum octet of charset */
81489 #define NLS_MAX_CHARSET_SIZE 6 /* for UTF-8 */
81490diff --git a/include/linux/notifier.h b/include/linux/notifier.h
81491index d14a4c3..a078786 100644
81492--- a/include/linux/notifier.h
81493+++ b/include/linux/notifier.h
81494@@ -54,7 +54,8 @@ struct notifier_block {
81495 notifier_fn_t notifier_call;
81496 struct notifier_block __rcu *next;
81497 int priority;
81498-};
81499+} __do_const;
81500+typedef struct notifier_block __no_const notifier_block_no_const;
81501
81502 struct atomic_notifier_head {
81503 spinlock_t lock;
81504diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h
81505index b2a0f15..4d7da32 100644
81506--- a/include/linux/oprofile.h
81507+++ b/include/linux/oprofile.h
81508@@ -138,9 +138,9 @@ int oprofilefs_create_ulong(struct dentry * root,
81509 int oprofilefs_create_ro_ulong(struct dentry * root,
81510 char const * name, ulong * val);
81511
81512-/** Create a file for read-only access to an atomic_t. */
81513+/** Create a file for read-only access to an atomic_unchecked_t. */
81514 int oprofilefs_create_ro_atomic(struct dentry * root,
81515- char const * name, atomic_t * val);
81516+ char const * name, atomic_unchecked_t * val);
81517
81518 /** create a directory */
81519 struct dentry *oprofilefs_mkdir(struct dentry *parent, char const *name);
81520diff --git a/include/linux/padata.h b/include/linux/padata.h
81521index 4386946..f50c615 100644
81522--- a/include/linux/padata.h
81523+++ b/include/linux/padata.h
81524@@ -129,7 +129,7 @@ struct parallel_data {
81525 struct padata_serial_queue __percpu *squeue;
81526 atomic_t reorder_objects;
81527 atomic_t refcnt;
81528- atomic_t seq_nr;
81529+ atomic_unchecked_t seq_nr;
81530 struct padata_cpumask cpumask;
81531 spinlock_t lock ____cacheline_aligned;
81532 unsigned int processed;
81533diff --git a/include/linux/path.h b/include/linux/path.h
81534index d137218..be0c176 100644
81535--- a/include/linux/path.h
81536+++ b/include/linux/path.h
81537@@ -1,13 +1,15 @@
81538 #ifndef _LINUX_PATH_H
81539 #define _LINUX_PATH_H
81540
81541+#include <linux/compiler.h>
81542+
81543 struct dentry;
81544 struct vfsmount;
81545
81546 struct path {
81547 struct vfsmount *mnt;
81548 struct dentry *dentry;
81549-};
81550+} __randomize_layout;
81551
81552 extern void path_get(const struct path *);
81553 extern void path_put(const struct path *);
81554diff --git a/include/linux/pci_hotplug.h b/include/linux/pci_hotplug.h
81555index 5f2e559..7d59314 100644
81556--- a/include/linux/pci_hotplug.h
81557+++ b/include/linux/pci_hotplug.h
81558@@ -71,7 +71,8 @@ struct hotplug_slot_ops {
81559 int (*get_latch_status) (struct hotplug_slot *slot, u8 *value);
81560 int (*get_adapter_status) (struct hotplug_slot *slot, u8 *value);
81561 int (*reset_slot) (struct hotplug_slot *slot, int probe);
81562-};
81563+} __do_const;
81564+typedef struct hotplug_slot_ops __no_const hotplug_slot_ops_no_const;
81565
81566 /**
81567 * struct hotplug_slot_info - used to notify the hotplug pci core of the state of the slot
81568diff --git a/include/linux/percpu-refcount.h b/include/linux/percpu-refcount.h
81569index 95961f0..0afb48f 100644
81570--- a/include/linux/percpu-refcount.h
81571+++ b/include/linux/percpu-refcount.h
81572@@ -110,7 +110,7 @@ static inline void percpu_ref_get(struct percpu_ref *ref)
81573 pcpu_count = ACCESS_ONCE(ref->pcpu_count);
81574
81575 if (likely(REF_STATUS(pcpu_count) == PCPU_REF_PTR))
81576- __this_cpu_inc(*pcpu_count);
81577+ this_cpu_inc(*pcpu_count);
81578 else
81579 atomic_inc(&ref->count);
81580
81581@@ -139,7 +139,7 @@ static inline bool percpu_ref_tryget(struct percpu_ref *ref)
81582 pcpu_count = ACCESS_ONCE(ref->pcpu_count);
81583
81584 if (likely(REF_STATUS(pcpu_count) == PCPU_REF_PTR)) {
81585- __this_cpu_inc(*pcpu_count);
81586+ this_cpu_inc(*pcpu_count);
81587 ret = true;
81588 }
81589
81590@@ -164,7 +164,7 @@ static inline void percpu_ref_put(struct percpu_ref *ref)
81591 pcpu_count = ACCESS_ONCE(ref->pcpu_count);
81592
81593 if (likely(REF_STATUS(pcpu_count) == PCPU_REF_PTR))
81594- __this_cpu_dec(*pcpu_count);
81595+ this_cpu_dec(*pcpu_count);
81596 else if (unlikely(atomic_dec_and_test(&ref->count)))
81597 ref->release(ref);
81598
81599diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
81600index e56b07f..aef789b 100644
81601--- a/include/linux/perf_event.h
81602+++ b/include/linux/perf_event.h
81603@@ -328,8 +328,8 @@ struct perf_event {
81604
81605 enum perf_event_active_state state;
81606 unsigned int attach_state;
81607- local64_t count;
81608- atomic64_t child_count;
81609+ local64_t count; /* PaX: fix it one day */
81610+ atomic64_unchecked_t child_count;
81611
81612 /*
81613 * These are the total time in nanoseconds that the event
81614@@ -380,8 +380,8 @@ struct perf_event {
81615 * These accumulate total time (in nanoseconds) that children
81616 * events have been enabled and running, respectively.
81617 */
81618- atomic64_t child_total_time_enabled;
81619- atomic64_t child_total_time_running;
81620+ atomic64_unchecked_t child_total_time_enabled;
81621+ atomic64_unchecked_t child_total_time_running;
81622
81623 /*
81624 * Protect attach/detach and child_list:
81625@@ -708,7 +708,7 @@ static inline void perf_callchain_store(struct perf_callchain_entry *entry, u64
81626 entry->ip[entry->nr++] = ip;
81627 }
81628
81629-extern int sysctl_perf_event_paranoid;
81630+extern int sysctl_perf_event_legitimately_concerned;
81631 extern int sysctl_perf_event_mlock;
81632 extern int sysctl_perf_event_sample_rate;
81633 extern int sysctl_perf_cpu_time_max_percent;
81634@@ -723,19 +723,24 @@ extern int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write,
81635 loff_t *ppos);
81636
81637
81638+static inline bool perf_paranoid_any(void)
81639+{
81640+ return sysctl_perf_event_legitimately_concerned > 2;
81641+}
81642+
81643 static inline bool perf_paranoid_tracepoint_raw(void)
81644 {
81645- return sysctl_perf_event_paranoid > -1;
81646+ return sysctl_perf_event_legitimately_concerned > -1;
81647 }
81648
81649 static inline bool perf_paranoid_cpu(void)
81650 {
81651- return sysctl_perf_event_paranoid > 0;
81652+ return sysctl_perf_event_legitimately_concerned > 0;
81653 }
81654
81655 static inline bool perf_paranoid_kernel(void)
81656 {
81657- return sysctl_perf_event_paranoid > 1;
81658+ return sysctl_perf_event_legitimately_concerned > 1;
81659 }
81660
81661 extern void perf_event_init(void);
81662@@ -851,7 +856,7 @@ struct perf_pmu_events_attr {
81663 struct device_attribute attr;
81664 u64 id;
81665 const char *event_str;
81666-};
81667+} __do_const;
81668
81669 #define PMU_EVENT_ATTR(_name, _var, _id, _show) \
81670 static struct perf_pmu_events_attr _var = { \
81671diff --git a/include/linux/pid_namespace.h b/include/linux/pid_namespace.h
81672index 7246ef3..1539ea4 100644
81673--- a/include/linux/pid_namespace.h
81674+++ b/include/linux/pid_namespace.h
81675@@ -43,7 +43,7 @@ struct pid_namespace {
81676 int hide_pid;
81677 int reboot; /* group exit code if this pidns was rebooted */
81678 unsigned int proc_inum;
81679-};
81680+} __randomize_layout;
81681
81682 extern struct pid_namespace init_pid_ns;
81683
81684diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
81685index ab57526..94598804 100644
81686--- a/include/linux/pipe_fs_i.h
81687+++ b/include/linux/pipe_fs_i.h
81688@@ -47,10 +47,10 @@ struct pipe_inode_info {
81689 struct mutex mutex;
81690 wait_queue_head_t wait;
81691 unsigned int nrbufs, curbuf, buffers;
81692- unsigned int readers;
81693- unsigned int writers;
81694- unsigned int files;
81695- unsigned int waiting_writers;
81696+ atomic_t readers;
81697+ atomic_t writers;
81698+ atomic_t files;
81699+ atomic_t waiting_writers;
81700 unsigned int r_counter;
81701 unsigned int w_counter;
81702 struct page *tmp_page;
81703diff --git a/include/linux/pm.h b/include/linux/pm.h
81704index 8c6583a..febb84c 100644
81705--- a/include/linux/pm.h
81706+++ b/include/linux/pm.h
81707@@ -597,6 +597,7 @@ extern int dev_pm_put_subsys_data(struct device *dev);
81708 struct dev_pm_domain {
81709 struct dev_pm_ops ops;
81710 };
81711+typedef struct dev_pm_domain __no_const dev_pm_domain_no_const;
81712
81713 /*
81714 * The PM_EVENT_ messages are also used by drivers implementing the legacy
81715diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
81716index 7c1d252..0e7061d 100644
81717--- a/include/linux/pm_domain.h
81718+++ b/include/linux/pm_domain.h
81719@@ -44,11 +44,11 @@ struct gpd_dev_ops {
81720 int (*thaw_early)(struct device *dev);
81721 int (*thaw)(struct device *dev);
81722 bool (*active_wakeup)(struct device *dev);
81723-};
81724+} __no_const;
81725
81726 struct gpd_cpu_data {
81727 unsigned int saved_exit_latency;
81728- struct cpuidle_state *idle_state;
81729+ cpuidle_state_no_const *idle_state;
81730 };
81731
81732 struct generic_pm_domain {
81733diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
81734index 16c9a62..f9f0838 100644
81735--- a/include/linux/pm_runtime.h
81736+++ b/include/linux/pm_runtime.h
81737@@ -109,7 +109,7 @@ static inline bool pm_runtime_callbacks_present(struct device *dev)
81738
81739 static inline void pm_runtime_mark_last_busy(struct device *dev)
81740 {
81741- ACCESS_ONCE(dev->power.last_busy) = jiffies;
81742+ ACCESS_ONCE_RW(dev->power.last_busy) = jiffies;
81743 }
81744
81745 #else /* !CONFIG_PM_RUNTIME */
81746diff --git a/include/linux/pnp.h b/include/linux/pnp.h
81747index 195aafc..49a7bc2 100644
81748--- a/include/linux/pnp.h
81749+++ b/include/linux/pnp.h
81750@@ -297,7 +297,7 @@ static inline void pnp_set_drvdata(struct pnp_dev *pdev, void *data)
81751 struct pnp_fixup {
81752 char id[7];
81753 void (*quirk_function) (struct pnp_dev * dev); /* fixup function */
81754-};
81755+} __do_const;
81756
81757 /* config parameters */
81758 #define PNP_CONFIG_NORMAL 0x0001
81759diff --git a/include/linux/poison.h b/include/linux/poison.h
81760index 2110a81..13a11bb 100644
81761--- a/include/linux/poison.h
81762+++ b/include/linux/poison.h
81763@@ -19,8 +19,8 @@
81764 * under normal circumstances, used to verify that nobody uses
81765 * non-initialized list entries.
81766 */
81767-#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
81768-#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
81769+#define LIST_POISON1 ((void *) (long)0xFFFFFF01)
81770+#define LIST_POISON2 ((void *) (long)0xFFFFFF02)
81771
81772 /********** include/linux/timer.h **********/
81773 /*
81774diff --git a/include/linux/power/smartreflex.h b/include/linux/power/smartreflex.h
81775index d8b187c3..9a9257a 100644
81776--- a/include/linux/power/smartreflex.h
81777+++ b/include/linux/power/smartreflex.h
81778@@ -238,7 +238,7 @@ struct omap_sr_class_data {
81779 int (*notify)(struct omap_sr *sr, u32 status);
81780 u8 notify_flags;
81781 u8 class_type;
81782-};
81783+} __do_const;
81784
81785 /**
81786 * struct omap_sr_nvalue_table - Smartreflex n-target value info
81787diff --git a/include/linux/ppp-comp.h b/include/linux/ppp-comp.h
81788index 4ea1d37..80f4b33 100644
81789--- a/include/linux/ppp-comp.h
81790+++ b/include/linux/ppp-comp.h
81791@@ -84,7 +84,7 @@ struct compressor {
81792 struct module *owner;
81793 /* Extra skb space needed by the compressor algorithm */
81794 unsigned int comp_extra;
81795-};
81796+} __do_const;
81797
81798 /*
81799 * The return value from decompress routine is the length of the
81800diff --git a/include/linux/preempt.h b/include/linux/preempt.h
81801index 1841b58..fbeebf8 100644
81802--- a/include/linux/preempt.h
81803+++ b/include/linux/preempt.h
81804@@ -29,11 +29,16 @@ extern void preempt_count_sub(int val);
81805 #define preempt_count_dec_and_test() __preempt_count_dec_and_test()
81806 #endif
81807
81808+#define raw_preempt_count_add(val) __preempt_count_add(val)
81809+#define raw_preempt_count_sub(val) __preempt_count_sub(val)
81810+
81811 #define __preempt_count_inc() __preempt_count_add(1)
81812 #define __preempt_count_dec() __preempt_count_sub(1)
81813
81814 #define preempt_count_inc() preempt_count_add(1)
81815+#define raw_preempt_count_inc() raw_preempt_count_add(1)
81816 #define preempt_count_dec() preempt_count_sub(1)
81817+#define raw_preempt_count_dec() raw_preempt_count_sub(1)
81818
81819 #ifdef CONFIG_PREEMPT_COUNT
81820
81821@@ -43,6 +48,12 @@ do { \
81822 barrier(); \
81823 } while (0)
81824
81825+#define raw_preempt_disable() \
81826+do { \
81827+ raw_preempt_count_inc(); \
81828+ barrier(); \
81829+} while (0)
81830+
81831 #define sched_preempt_enable_no_resched() \
81832 do { \
81833 barrier(); \
81834@@ -51,6 +62,12 @@ do { \
81835
81836 #define preempt_enable_no_resched() sched_preempt_enable_no_resched()
81837
81838+#define raw_preempt_enable_no_resched() \
81839+do { \
81840+ barrier(); \
81841+ raw_preempt_count_dec(); \
81842+} while (0)
81843+
81844 #ifdef CONFIG_PREEMPT
81845 #define preempt_enable() \
81846 do { \
81847@@ -115,8 +132,10 @@ do { \
81848 * region.
81849 */
81850 #define preempt_disable() barrier()
81851+#define raw_preempt_disable() barrier()
81852 #define sched_preempt_enable_no_resched() barrier()
81853 #define preempt_enable_no_resched() barrier()
81854+#define raw_preempt_enable_no_resched() barrier()
81855 #define preempt_enable() barrier()
81856 #define preempt_check_resched() do { } while (0)
81857
81858@@ -130,11 +149,13 @@ do { \
81859 /*
81860 * Modules have no business playing preemption tricks.
81861 */
81862+#ifndef CONFIG_PAX_KERNEXEC
81863 #undef sched_preempt_enable_no_resched
81864 #undef preempt_enable_no_resched
81865 #undef preempt_enable_no_resched_notrace
81866 #undef preempt_check_resched
81867 #endif
81868+#endif
81869
81870 #define preempt_set_need_resched() \
81871 do { \
81872diff --git a/include/linux/printk.h b/include/linux/printk.h
81873index fa47e27..c08e034 100644
81874--- a/include/linux/printk.h
81875+++ b/include/linux/printk.h
81876@@ -114,6 +114,8 @@ static inline __printf(1, 2) __cold
81877 void early_printk(const char *s, ...) { }
81878 #endif
81879
81880+extern int kptr_restrict;
81881+
81882 #ifdef CONFIG_PRINTK
81883 asmlinkage __printf(5, 0)
81884 int vprintk_emit(int facility, int level,
81885@@ -148,7 +150,6 @@ extern bool printk_timed_ratelimit(unsigned long *caller_jiffies,
81886
81887 extern int printk_delay_msec;
81888 extern int dmesg_restrict;
81889-extern int kptr_restrict;
81890
81891 extern void wake_up_klogd(void);
81892
81893diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
81894index 608e60a..bbcb1a0 100644
81895--- a/include/linux/proc_fs.h
81896+++ b/include/linux/proc_fs.h
81897@@ -17,8 +17,11 @@ extern void proc_flush_task(struct task_struct *);
81898 extern struct proc_dir_entry *proc_symlink(const char *,
81899 struct proc_dir_entry *, const char *);
81900 extern struct proc_dir_entry *proc_mkdir(const char *, struct proc_dir_entry *);
81901+extern struct proc_dir_entry *proc_mkdir_restrict(const char *, struct proc_dir_entry *);
81902 extern struct proc_dir_entry *proc_mkdir_data(const char *, umode_t,
81903 struct proc_dir_entry *, void *);
81904+extern struct proc_dir_entry *proc_mkdir_data_restrict(const char *, umode_t,
81905+ struct proc_dir_entry *, void *);
81906 extern struct proc_dir_entry *proc_mkdir_mode(const char *, umode_t,
81907 struct proc_dir_entry *);
81908
81909@@ -34,6 +37,19 @@ static inline struct proc_dir_entry *proc_create(
81910 return proc_create_data(name, mode, parent, proc_fops, NULL);
81911 }
81912
81913+static inline struct proc_dir_entry *proc_create_grsec(const char *name, umode_t mode,
81914+ struct proc_dir_entry *parent, const struct file_operations *proc_fops)
81915+{
81916+#ifdef CONFIG_GRKERNSEC_PROC_USER
81917+ return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
81918+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
81919+ return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
81920+#else
81921+ return proc_create_data(name, mode, parent, proc_fops, NULL);
81922+#endif
81923+}
81924+
81925+
81926 extern void proc_set_size(struct proc_dir_entry *, loff_t);
81927 extern void proc_set_user(struct proc_dir_entry *, kuid_t, kgid_t);
81928 extern void *PDE_DATA(const struct inode *);
81929@@ -73,7 +89,7 @@ static inline int remove_proc_subtree(const char *name, struct proc_dir_entry *p
81930 static inline struct proc_dir_entry *proc_net_mkdir(
81931 struct net *net, const char *name, struct proc_dir_entry *parent)
81932 {
81933- return proc_mkdir_data(name, 0, parent, net);
81934+ return proc_mkdir_data_restrict(name, 0, parent, net);
81935 }
81936
81937 #endif /* _LINUX_PROC_FS_H */
81938diff --git a/include/linux/proc_ns.h b/include/linux/proc_ns.h
81939index 34a1e10..70f6bde 100644
81940--- a/include/linux/proc_ns.h
81941+++ b/include/linux/proc_ns.h
81942@@ -14,7 +14,7 @@ struct proc_ns_operations {
81943 void (*put)(void *ns);
81944 int (*install)(struct nsproxy *nsproxy, void *ns);
81945 unsigned int (*inum)(void *ns);
81946-};
81947+} __do_const __randomize_layout;
81948
81949 struct proc_ns {
81950 void *ns;
81951diff --git a/include/linux/quota.h b/include/linux/quota.h
81952index cc7494a..1e27036 100644
81953--- a/include/linux/quota.h
81954+++ b/include/linux/quota.h
81955@@ -70,7 +70,7 @@ struct kqid { /* Type in which we store the quota identifier */
81956
81957 extern bool qid_eq(struct kqid left, struct kqid right);
81958 extern bool qid_lt(struct kqid left, struct kqid right);
81959-extern qid_t from_kqid(struct user_namespace *to, struct kqid qid);
81960+extern qid_t from_kqid(struct user_namespace *to, struct kqid qid) __intentional_overflow(-1);
81961 extern qid_t from_kqid_munged(struct user_namespace *to, struct kqid qid);
81962 extern bool qid_valid(struct kqid qid);
81963
81964diff --git a/include/linux/random.h b/include/linux/random.h
81965index 1cfce0e..b0b9235 100644
81966--- a/include/linux/random.h
81967+++ b/include/linux/random.h
81968@@ -9,9 +9,19 @@
81969 #include <uapi/linux/random.h>
81970
81971 extern void add_device_randomness(const void *, unsigned int);
81972+
81973+static inline void add_latent_entropy(void)
81974+{
81975+
81976+#ifdef LATENT_ENTROPY_PLUGIN
81977+ add_device_randomness((const void *)&latent_entropy, sizeof(latent_entropy));
81978+#endif
81979+
81980+}
81981+
81982 extern void add_input_randomness(unsigned int type, unsigned int code,
81983- unsigned int value);
81984-extern void add_interrupt_randomness(int irq, int irq_flags);
81985+ unsigned int value) __latent_entropy;
81986+extern void add_interrupt_randomness(int irq, int irq_flags) __latent_entropy;
81987
81988 extern void get_random_bytes(void *buf, int nbytes);
81989 extern void get_random_bytes_arch(void *buf, int nbytes);
81990@@ -22,10 +32,10 @@ extern int random_int_secret_init(void);
81991 extern const struct file_operations random_fops, urandom_fops;
81992 #endif
81993
81994-unsigned int get_random_int(void);
81995+unsigned int __intentional_overflow(-1) get_random_int(void);
81996 unsigned long randomize_range(unsigned long start, unsigned long end, unsigned long len);
81997
81998-u32 prandom_u32(void);
81999+u32 prandom_u32(void) __intentional_overflow(-1);
82000 void prandom_bytes(void *buf, int nbytes);
82001 void prandom_seed(u32 seed);
82002 void prandom_reseed_late(void);
82003@@ -37,6 +47,11 @@ struct rnd_state {
82004 u32 prandom_u32_state(struct rnd_state *state);
82005 void prandom_bytes_state(struct rnd_state *state, void *buf, int nbytes);
82006
82007+static inline unsigned long __intentional_overflow(-1) pax_get_random_long(void)
82008+{
82009+ return prandom_u32() + (sizeof(long) > 4 ? (unsigned long)prandom_u32() << 32 : 0);
82010+}
82011+
82012 /**
82013 * prandom_u32_max - returns a pseudo-random number in interval [0, ep_ro)
82014 * @ep_ro: right open interval endpoint
82015diff --git a/include/linux/rbtree_augmented.h b/include/linux/rbtree_augmented.h
82016index fea49b5..2ac22bb 100644
82017--- a/include/linux/rbtree_augmented.h
82018+++ b/include/linux/rbtree_augmented.h
82019@@ -80,7 +80,9 @@ rbname ## _rotate(struct rb_node *rb_old, struct rb_node *rb_new) \
82020 old->rbaugmented = rbcompute(old); \
82021 } \
82022 rbstatic const struct rb_augment_callbacks rbname = { \
82023- rbname ## _propagate, rbname ## _copy, rbname ## _rotate \
82024+ .propagate = rbname ## _propagate, \
82025+ .copy = rbname ## _copy, \
82026+ .rotate = rbname ## _rotate \
82027 };
82028
82029
82030diff --git a/include/linux/rculist.h b/include/linux/rculist.h
82031index dbaf990..52e07b8 100644
82032--- a/include/linux/rculist.h
82033+++ b/include/linux/rculist.h
82034@@ -29,8 +29,8 @@
82035 */
82036 static inline void INIT_LIST_HEAD_RCU(struct list_head *list)
82037 {
82038- ACCESS_ONCE(list->next) = list;
82039- ACCESS_ONCE(list->prev) = list;
82040+ ACCESS_ONCE_RW(list->next) = list;
82041+ ACCESS_ONCE_RW(list->prev) = list;
82042 }
82043
82044 /*
82045@@ -59,6 +59,9 @@ void __list_add_rcu(struct list_head *new,
82046 struct list_head *prev, struct list_head *next);
82047 #endif
82048
82049+void __pax_list_add_rcu(struct list_head *new,
82050+ struct list_head *prev, struct list_head *next);
82051+
82052 /**
82053 * list_add_rcu - add a new entry to rcu-protected list
82054 * @new: new entry to be added
82055@@ -80,6 +83,11 @@ static inline void list_add_rcu(struct list_head *new, struct list_head *head)
82056 __list_add_rcu(new, head, head->next);
82057 }
82058
82059+static inline void pax_list_add_rcu(struct list_head *new, struct list_head *head)
82060+{
82061+ __pax_list_add_rcu(new, head, head->next);
82062+}
82063+
82064 /**
82065 * list_add_tail_rcu - add a new entry to rcu-protected list
82066 * @new: new entry to be added
82067@@ -102,6 +110,12 @@ static inline void list_add_tail_rcu(struct list_head *new,
82068 __list_add_rcu(new, head->prev, head);
82069 }
82070
82071+static inline void pax_list_add_tail_rcu(struct list_head *new,
82072+ struct list_head *head)
82073+{
82074+ __pax_list_add_rcu(new, head->prev, head);
82075+}
82076+
82077 /**
82078 * list_del_rcu - deletes entry from list without re-initialization
82079 * @entry: the element to delete from the list.
82080@@ -132,6 +146,8 @@ static inline void list_del_rcu(struct list_head *entry)
82081 entry->prev = LIST_POISON2;
82082 }
82083
82084+extern void pax_list_del_rcu(struct list_head *entry);
82085+
82086 /**
82087 * hlist_del_init_rcu - deletes entry from hash list with re-initialization
82088 * @n: the element to delete from the hash list.
82089diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
82090index 72bf3a0..853347f 100644
82091--- a/include/linux/rcupdate.h
82092+++ b/include/linux/rcupdate.h
82093@@ -588,7 +588,7 @@ static inline void rcu_preempt_sleep_check(void)
82094 #define rcu_assign_pointer(p, v) \
82095 do { \
82096 smp_wmb(); \
82097- ACCESS_ONCE(p) = RCU_INITIALIZER(v); \
82098+ ACCESS_ONCE_RW(p) = RCU_INITIALIZER(v); \
82099 } while (0)
82100
82101
82102diff --git a/include/linux/reboot.h b/include/linux/reboot.h
82103index 9e7db9e..7d4fd72 100644
82104--- a/include/linux/reboot.h
82105+++ b/include/linux/reboot.h
82106@@ -44,9 +44,9 @@ extern int unregister_reboot_notifier(struct notifier_block *);
82107 */
82108
82109 extern void migrate_to_reboot_cpu(void);
82110-extern void machine_restart(char *cmd);
82111-extern void machine_halt(void);
82112-extern void machine_power_off(void);
82113+extern void machine_restart(char *cmd) __noreturn;
82114+extern void machine_halt(void) __noreturn;
82115+extern void machine_power_off(void) __noreturn;
82116
82117 extern void machine_shutdown(void);
82118 struct pt_regs;
82119@@ -57,9 +57,9 @@ extern void machine_crash_shutdown(struct pt_regs *);
82120 */
82121
82122 extern void kernel_restart_prepare(char *cmd);
82123-extern void kernel_restart(char *cmd);
82124-extern void kernel_halt(void);
82125-extern void kernel_power_off(void);
82126+extern void kernel_restart(char *cmd) __noreturn;
82127+extern void kernel_halt(void) __noreturn;
82128+extern void kernel_power_off(void) __noreturn;
82129
82130 extern int C_A_D; /* for sysctl */
82131 void ctrl_alt_del(void);
82132@@ -73,7 +73,7 @@ extern int orderly_poweroff(bool force);
82133 * Emergency restart, callable from an interrupt handler.
82134 */
82135
82136-extern void emergency_restart(void);
82137+extern void emergency_restart(void) __noreturn;
82138 #include <asm/emergency-restart.h>
82139
82140 #endif /* _LINUX_REBOOT_H */
82141diff --git a/include/linux/regset.h b/include/linux/regset.h
82142index 8e0c9fe..ac4d221 100644
82143--- a/include/linux/regset.h
82144+++ b/include/linux/regset.h
82145@@ -161,7 +161,8 @@ struct user_regset {
82146 unsigned int align;
82147 unsigned int bias;
82148 unsigned int core_note_type;
82149-};
82150+} __do_const;
82151+typedef struct user_regset __no_const user_regset_no_const;
82152
82153 /**
82154 * struct user_regset_view - available regsets
82155diff --git a/include/linux/relay.h b/include/linux/relay.h
82156index d7c8359..818daf5 100644
82157--- a/include/linux/relay.h
82158+++ b/include/linux/relay.h
82159@@ -157,7 +157,7 @@ struct rchan_callbacks
82160 * The callback should return 0 if successful, negative if not.
82161 */
82162 int (*remove_buf_file)(struct dentry *dentry);
82163-};
82164+} __no_const;
82165
82166 /*
82167 * CONFIG_RELAY kernel API, kernel/relay.c
82168diff --git a/include/linux/rio.h b/include/linux/rio.h
82169index b71d573..2f940bd 100644
82170--- a/include/linux/rio.h
82171+++ b/include/linux/rio.h
82172@@ -355,7 +355,7 @@ struct rio_ops {
82173 int (*map_inb)(struct rio_mport *mport, dma_addr_t lstart,
82174 u64 rstart, u32 size, u32 flags);
82175 void (*unmap_inb)(struct rio_mport *mport, dma_addr_t lstart);
82176-};
82177+} __no_const;
82178
82179 #define RIO_RESOURCE_MEM 0x00000100
82180 #define RIO_RESOURCE_DOORBELL 0x00000200
82181diff --git a/include/linux/rmap.h b/include/linux/rmap.h
82182index b66c211..13d2915 100644
82183--- a/include/linux/rmap.h
82184+++ b/include/linux/rmap.h
82185@@ -145,8 +145,8 @@ static inline void anon_vma_unlock_read(struct anon_vma *anon_vma)
82186 void anon_vma_init(void); /* create anon_vma_cachep */
82187 int anon_vma_prepare(struct vm_area_struct *);
82188 void unlink_anon_vmas(struct vm_area_struct *);
82189-int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *);
82190-int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
82191+int anon_vma_clone(struct vm_area_struct *, const struct vm_area_struct *);
82192+int anon_vma_fork(struct vm_area_struct *, const struct vm_area_struct *);
82193
82194 static inline void anon_vma_merge(struct vm_area_struct *vma,
82195 struct vm_area_struct *next)
82196diff --git a/include/linux/sched.h b/include/linux/sched.h
82197index ccd0c6f..39c28a4 100644
82198--- a/include/linux/sched.h
82199+++ b/include/linux/sched.h
82200@@ -129,6 +129,7 @@ struct fs_struct;
82201 struct perf_event_context;
82202 struct blk_plug;
82203 struct filename;
82204+struct linux_binprm;
82205
82206 /*
82207 * List of flags we want to share for kernel threads,
82208@@ -369,7 +370,7 @@ extern char __sched_text_start[], __sched_text_end[];
82209 extern int in_sched_functions(unsigned long addr);
82210
82211 #define MAX_SCHEDULE_TIMEOUT LONG_MAX
82212-extern signed long schedule_timeout(signed long timeout);
82213+extern signed long schedule_timeout(signed long timeout) __intentional_overflow(-1);
82214 extern signed long schedule_timeout_interruptible(signed long timeout);
82215 extern signed long schedule_timeout_killable(signed long timeout);
82216 extern signed long schedule_timeout_uninterruptible(signed long timeout);
82217@@ -380,6 +381,19 @@ struct nsproxy;
82218 struct user_namespace;
82219
82220 #ifdef CONFIG_MMU
82221+
82222+#ifdef CONFIG_GRKERNSEC_RAND_THREADSTACK
82223+extern unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags);
82224+#else
82225+static inline unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags)
82226+{
82227+ return 0;
82228+}
82229+#endif
82230+
82231+extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len, unsigned long offset);
82232+extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len, unsigned long offset);
82233+
82234 extern void arch_pick_mmap_layout(struct mm_struct *mm);
82235 extern unsigned long
82236 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
82237@@ -677,6 +691,17 @@ struct signal_struct {
82238 #ifdef CONFIG_TASKSTATS
82239 struct taskstats *stats;
82240 #endif
82241+
82242+#ifdef CONFIG_GRKERNSEC
82243+ u32 curr_ip;
82244+ u32 saved_ip;
82245+ u32 gr_saddr;
82246+ u32 gr_daddr;
82247+ u16 gr_sport;
82248+ u16 gr_dport;
82249+ u8 used_accept:1;
82250+#endif
82251+
82252 #ifdef CONFIG_AUDIT
82253 unsigned audit_tty;
82254 unsigned audit_tty_log_passwd;
82255@@ -703,7 +728,7 @@ struct signal_struct {
82256 struct mutex cred_guard_mutex; /* guard against foreign influences on
82257 * credential calculations
82258 * (notably. ptrace) */
82259-};
82260+} __randomize_layout;
82261
82262 /*
82263 * Bits in flags field of signal_struct.
82264@@ -757,6 +782,14 @@ struct user_struct {
82265 struct key *session_keyring; /* UID's default session keyring */
82266 #endif
82267
82268+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
82269+ unsigned char kernel_banned;
82270+#endif
82271+#ifdef CONFIG_GRKERNSEC_BRUTE
82272+ unsigned char suid_banned;
82273+ unsigned long suid_ban_expires;
82274+#endif
82275+
82276 /* Hash table maintenance information */
82277 struct hlist_node uidhash_node;
82278 kuid_t uid;
82279@@ -764,7 +797,7 @@ struct user_struct {
82280 #ifdef CONFIG_PERF_EVENTS
82281 atomic_long_t locked_vm;
82282 #endif
82283-};
82284+} __randomize_layout;
82285
82286 extern int uids_sysfs_init(void);
82287
82288@@ -1286,8 +1319,8 @@ struct task_struct {
82289 struct list_head thread_node;
82290
82291 struct completion *vfork_done; /* for vfork() */
82292- int __user *set_child_tid; /* CLONE_CHILD_SETTID */
82293- int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
82294+ pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */
82295+ pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
82296
82297 cputime_t utime, stime, utimescaled, stimescaled;
82298 cputime_t gtime;
82299@@ -1312,11 +1345,6 @@ struct task_struct {
82300 struct task_cputime cputime_expires;
82301 struct list_head cpu_timers[3];
82302
82303-/* process credentials */
82304- const struct cred __rcu *real_cred; /* objective and real subjective task
82305- * credentials (COW) */
82306- const struct cred __rcu *cred; /* effective (overridable) subjective task
82307- * credentials (COW) */
82308 char comm[TASK_COMM_LEN]; /* executable name excluding path
82309 - access with [gs]et_task_comm (which lock
82310 it with task_lock())
82311@@ -1333,6 +1361,10 @@ struct task_struct {
82312 #endif
82313 /* CPU-specific state of this task */
82314 struct thread_struct thread;
82315+/* thread_info moved to task_struct */
82316+#ifdef CONFIG_X86
82317+ struct thread_info tinfo;
82318+#endif
82319 /* filesystem information */
82320 struct fs_struct *fs;
82321 /* open file information */
82322@@ -1409,6 +1441,10 @@ struct task_struct {
82323 gfp_t lockdep_reclaim_gfp;
82324 #endif
82325
82326+/* process credentials */
82327+ const struct cred __rcu *real_cred; /* objective and real subjective task
82328+ * credentials (COW) */
82329+
82330 /* journalling filesystem info */
82331 void *journal_info;
82332
82333@@ -1447,6 +1483,10 @@ struct task_struct {
82334 /* cg_list protected by css_set_lock and tsk->alloc_lock */
82335 struct list_head cg_list;
82336 #endif
82337+
82338+ const struct cred __rcu *cred; /* effective (overridable) subjective task
82339+ * credentials (COW) */
82340+
82341 #ifdef CONFIG_FUTEX
82342 struct robust_list_head __user *robust_list;
82343 #ifdef CONFIG_COMPAT
82344@@ -1581,7 +1621,78 @@ struct task_struct {
82345 unsigned int sequential_io;
82346 unsigned int sequential_io_avg;
82347 #endif
82348-};
82349+
82350+#ifdef CONFIG_GRKERNSEC
82351+ /* grsecurity */
82352+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
82353+ u64 exec_id;
82354+#endif
82355+#ifdef CONFIG_GRKERNSEC_SETXID
82356+ const struct cred *delayed_cred;
82357+#endif
82358+ struct dentry *gr_chroot_dentry;
82359+ struct acl_subject_label *acl;
82360+ struct acl_subject_label *tmpacl;
82361+ struct acl_role_label *role;
82362+ struct file *exec_file;
82363+ unsigned long brute_expires;
82364+ u16 acl_role_id;
82365+ u8 inherited;
82366+ /* is this the task that authenticated to the special role */
82367+ u8 acl_sp_role;
82368+ u8 is_writable;
82369+ u8 brute;
82370+ u8 gr_is_chrooted;
82371+#endif
82372+
82373+} __randomize_layout;
82374+
82375+#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
82376+#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
82377+#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
82378+#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
82379+/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */
82380+#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
82381+
82382+#ifdef CONFIG_PAX_SOFTMODE
82383+extern int pax_softmode;
82384+#endif
82385+
82386+extern int pax_check_flags(unsigned long *);
82387+#define PAX_PARSE_FLAGS_FALLBACK (~0UL)
82388+
82389+/* if tsk != current then task_lock must be held on it */
82390+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
82391+static inline unsigned long pax_get_flags(struct task_struct *tsk)
82392+{
82393+ if (likely(tsk->mm))
82394+ return tsk->mm->pax_flags;
82395+ else
82396+ return 0UL;
82397+}
82398+
82399+/* if tsk != current then task_lock must be held on it */
82400+static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
82401+{
82402+ if (likely(tsk->mm)) {
82403+ tsk->mm->pax_flags = flags;
82404+ return 0;
82405+ }
82406+ return -EINVAL;
82407+}
82408+#endif
82409+
82410+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
82411+extern void pax_set_initial_flags(struct linux_binprm *bprm);
82412+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
82413+extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
82414+#endif
82415+
82416+struct path;
82417+extern char *pax_get_path(const struct path *path, char *buf, int buflen);
82418+extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
82419+extern void pax_report_insns(struct pt_regs *regs, void *pc, void *sp);
82420+extern void pax_report_refcount_overflow(struct pt_regs *regs);
82421
82422 /* Future-safe accessor for struct task_struct's cpus_allowed. */
82423 #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
82424@@ -1658,7 +1769,7 @@ struct pid_namespace;
82425 pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
82426 struct pid_namespace *ns);
82427
82428-static inline pid_t task_pid_nr(struct task_struct *tsk)
82429+static inline pid_t task_pid_nr(const struct task_struct *tsk)
82430 {
82431 return tsk->pid;
82432 }
82433@@ -2006,6 +2117,25 @@ extern u64 sched_clock_cpu(int cpu);
82434
82435 extern void sched_clock_init(void);
82436
82437+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
82438+static inline void populate_stack(void)
82439+{
82440+ struct task_struct *curtask = current;
82441+ int c;
82442+ int *ptr = curtask->stack;
82443+ int *end = curtask->stack + THREAD_SIZE;
82444+
82445+ while (ptr < end) {
82446+ c = *(volatile int *)ptr;
82447+ ptr += PAGE_SIZE/sizeof(int);
82448+ }
82449+}
82450+#else
82451+static inline void populate_stack(void)
82452+{
82453+}
82454+#endif
82455+
82456 #ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
82457 static inline void sched_clock_tick(void)
82458 {
82459@@ -2130,7 +2260,9 @@ void yield(void);
82460 extern struct exec_domain default_exec_domain;
82461
82462 union thread_union {
82463+#ifndef CONFIG_X86
82464 struct thread_info thread_info;
82465+#endif
82466 unsigned long stack[THREAD_SIZE/sizeof(long)];
82467 };
82468
82469@@ -2163,6 +2295,7 @@ extern struct pid_namespace init_pid_ns;
82470 */
82471
82472 extern struct task_struct *find_task_by_vpid(pid_t nr);
82473+extern struct task_struct *find_task_by_vpid_unrestricted(pid_t nr);
82474 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
82475 struct pid_namespace *ns);
82476
82477@@ -2325,7 +2458,7 @@ extern void __cleanup_sighand(struct sighand_struct *);
82478 extern void exit_itimers(struct signal_struct *);
82479 extern void flush_itimer_signals(void);
82480
82481-extern void do_group_exit(int);
82482+extern __noreturn void do_group_exit(int);
82483
82484 extern int allow_signal(int);
82485 extern int disallow_signal(int);
82486@@ -2526,9 +2659,9 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
82487
82488 #endif
82489
82490-static inline int object_is_on_stack(void *obj)
82491+static inline int object_starts_on_stack(void *obj)
82492 {
82493- void *stack = task_stack_page(current);
82494+ const void *stack = task_stack_page(current);
82495
82496 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
82497 }
82498diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h
82499index 8045a55..c959cd5 100644
82500--- a/include/linux/sched/sysctl.h
82501+++ b/include/linux/sched/sysctl.h
82502@@ -30,6 +30,7 @@ enum { sysctl_hung_task_timeout_secs = 0 };
82503 #define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
82504
82505 extern int sysctl_max_map_count;
82506+extern unsigned long sysctl_heap_stack_gap;
82507
82508 extern unsigned int sysctl_sched_latency;
82509 extern unsigned int sysctl_sched_min_granularity;
82510diff --git a/include/linux/security.h b/include/linux/security.h
82511index 2fc42d1..4d802f2 100644
82512--- a/include/linux/security.h
82513+++ b/include/linux/security.h
82514@@ -27,6 +27,7 @@
82515 #include <linux/slab.h>
82516 #include <linux/err.h>
82517 #include <linux/string.h>
82518+#include <linux/grsecurity.h>
82519
82520 struct linux_binprm;
82521 struct cred;
82522@@ -116,8 +117,6 @@ struct seq_file;
82523
82524 extern int cap_netlink_send(struct sock *sk, struct sk_buff *skb);
82525
82526-void reset_security_ops(void);
82527-
82528 #ifdef CONFIG_MMU
82529 extern unsigned long mmap_min_addr;
82530 extern unsigned long dac_mmap_min_addr;
82531@@ -1719,7 +1718,7 @@ struct security_operations {
82532 struct audit_context *actx);
82533 void (*audit_rule_free) (void *lsmrule);
82534 #endif /* CONFIG_AUDIT */
82535-};
82536+} __randomize_layout;
82537
82538 /* prototypes */
82539 extern int security_init(void);
82540diff --git a/include/linux/semaphore.h b/include/linux/semaphore.h
82541index dc368b8..e895209 100644
82542--- a/include/linux/semaphore.h
82543+++ b/include/linux/semaphore.h
82544@@ -37,7 +37,7 @@ static inline void sema_init(struct semaphore *sem, int val)
82545 }
82546
82547 extern void down(struct semaphore *sem);
82548-extern int __must_check down_interruptible(struct semaphore *sem);
82549+extern int __must_check down_interruptible(struct semaphore *sem) __intentional_overflow(-1);
82550 extern int __must_check down_killable(struct semaphore *sem);
82551 extern int __must_check down_trylock(struct semaphore *sem);
82552 extern int __must_check down_timeout(struct semaphore *sem, long jiffies);
82553diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
82554index 52e0097..383f21d 100644
82555--- a/include/linux/seq_file.h
82556+++ b/include/linux/seq_file.h
82557@@ -27,6 +27,9 @@ struct seq_file {
82558 struct mutex lock;
82559 const struct seq_operations *op;
82560 int poll_event;
82561+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
82562+ u64 exec_id;
82563+#endif
82564 #ifdef CONFIG_USER_NS
82565 struct user_namespace *user_ns;
82566 #endif
82567@@ -39,6 +42,7 @@ struct seq_operations {
82568 void * (*next) (struct seq_file *m, void *v, loff_t *pos);
82569 int (*show) (struct seq_file *m, void *v);
82570 };
82571+typedef struct seq_operations __no_const seq_operations_no_const;
82572
82573 #define SEQ_SKIP 1
82574
82575@@ -96,6 +100,7 @@ void seq_pad(struct seq_file *m, char c);
82576
82577 char *mangle_path(char *s, const char *p, const char *esc);
82578 int seq_open(struct file *, const struct seq_operations *);
82579+int seq_open_restrict(struct file *, const struct seq_operations *);
82580 ssize_t seq_read(struct file *, char __user *, size_t, loff_t *);
82581 loff_t seq_lseek(struct file *, loff_t, int);
82582 int seq_release(struct inode *, struct file *);
82583@@ -138,6 +143,7 @@ static inline int seq_nodemask_list(struct seq_file *m, nodemask_t *mask)
82584 }
82585
82586 int single_open(struct file *, int (*)(struct seq_file *, void *), void *);
82587+int single_open_restrict(struct file *, int (*)(struct seq_file *, void *), void *);
82588 int single_open_size(struct file *, int (*)(struct seq_file *, void *), void *, size_t);
82589 int single_release(struct inode *, struct file *);
82590 void *__seq_open_private(struct file *, const struct seq_operations *, int);
82591diff --git a/include/linux/shm.h b/include/linux/shm.h
82592index 1e2cd2e..0288750 100644
82593--- a/include/linux/shm.h
82594+++ b/include/linux/shm.h
82595@@ -21,6 +21,10 @@ struct shmid_kernel /* private to the kernel */
82596
82597 /* The task created the shm object. NULL if the task is dead. */
82598 struct task_struct *shm_creator;
82599+#ifdef CONFIG_GRKERNSEC
82600+ time_t shm_createtime;
82601+ pid_t shm_lapid;
82602+#endif
82603 };
82604
82605 /* shm_mode upper byte flags */
82606diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
82607index 15ede6a..80161c3 100644
82608--- a/include/linux/skbuff.h
82609+++ b/include/linux/skbuff.h
82610@@ -662,7 +662,7 @@ bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
82611 struct sk_buff *__alloc_skb(unsigned int size, gfp_t priority, int flags,
82612 int node);
82613 struct sk_buff *build_skb(void *data, unsigned int frag_size);
82614-static inline struct sk_buff *alloc_skb(unsigned int size,
82615+static inline struct sk_buff * __intentional_overflow(0) alloc_skb(unsigned int size,
82616 gfp_t priority)
82617 {
82618 return __alloc_skb(size, priority, 0, NUMA_NO_NODE);
82619@@ -1768,7 +1768,7 @@ static inline u32 skb_inner_network_header_len(const struct sk_buff *skb)
82620 return skb->inner_transport_header - skb->inner_network_header;
82621 }
82622
82623-static inline int skb_network_offset(const struct sk_buff *skb)
82624+static inline int __intentional_overflow(0) skb_network_offset(const struct sk_buff *skb)
82625 {
82626 return skb_network_header(skb) - skb->data;
82627 }
82628@@ -1828,7 +1828,7 @@ static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
82629 * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
82630 */
82631 #ifndef NET_SKB_PAD
82632-#define NET_SKB_PAD max(32, L1_CACHE_BYTES)
82633+#define NET_SKB_PAD max(_AC(32,UL), L1_CACHE_BYTES)
82634 #endif
82635
82636 int ___pskb_trim(struct sk_buff *skb, unsigned int len);
82637@@ -2427,7 +2427,7 @@ struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock,
82638 int *err);
82639 unsigned int datagram_poll(struct file *file, struct socket *sock,
82640 struct poll_table_struct *wait);
82641-int skb_copy_datagram_iovec(const struct sk_buff *from, int offset,
82642+int __intentional_overflow(0) skb_copy_datagram_iovec(const struct sk_buff *from, int offset,
82643 struct iovec *to, int size);
82644 int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb, int hlen,
82645 struct iovec *iov);
82646@@ -2721,6 +2721,9 @@ static inline void nf_reset(struct sk_buff *skb)
82647 nf_bridge_put(skb->nf_bridge);
82648 skb->nf_bridge = NULL;
82649 #endif
82650+#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
82651+ skb->nf_trace = 0;
82652+#endif
82653 }
82654
82655 static inline void nf_reset_trace(struct sk_buff *skb)
82656diff --git a/include/linux/slab.h b/include/linux/slab.h
82657index b5b2df6..69f5734 100644
82658--- a/include/linux/slab.h
82659+++ b/include/linux/slab.h
82660@@ -14,15 +14,29 @@
82661 #include <linux/gfp.h>
82662 #include <linux/types.h>
82663 #include <linux/workqueue.h>
82664-
82665+#include <linux/err.h>
82666
82667 /*
82668 * Flags to pass to kmem_cache_create().
82669 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
82670 */
82671 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
82672+
82673+#ifdef CONFIG_PAX_USERCOPY_SLABS
82674+#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */
82675+#else
82676+#define SLAB_USERCOPY 0x00000000UL
82677+#endif
82678+
82679 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
82680 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
82681+
82682+#ifdef CONFIG_PAX_MEMORY_SANITIZE
82683+#define SLAB_NO_SANITIZE 0x00001000UL /* PaX: Do not sanitize objs on free */
82684+#else
82685+#define SLAB_NO_SANITIZE 0x00000000UL
82686+#endif
82687+
82688 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
82689 #define SLAB_CACHE_DMA 0x00004000UL /* Use GFP_DMA memory */
82690 #define SLAB_STORE_USER 0x00010000UL /* DEBUG: Store the last owner for bug hunting */
82691@@ -98,10 +112,13 @@
82692 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
82693 * Both make kfree a no-op.
82694 */
82695-#define ZERO_SIZE_PTR ((void *)16)
82696+#define ZERO_SIZE_PTR \
82697+({ \
82698+ BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
82699+ (void *)(-MAX_ERRNO-1L); \
82700+})
82701
82702-#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
82703- (unsigned long)ZERO_SIZE_PTR)
82704+#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
82705
82706 #include <linux/kmemleak.h>
82707
82708@@ -142,6 +159,8 @@ void * __must_check krealloc(const void *, size_t, gfp_t);
82709 void kfree(const void *);
82710 void kzfree(const void *);
82711 size_t ksize(const void *);
82712+const char *check_heap_object(const void *ptr, unsigned long n);
82713+bool is_usercopy_object(const void *ptr);
82714
82715 /*
82716 * Some archs want to perform DMA into kmalloc caches and need a guaranteed
82717@@ -174,7 +193,7 @@ struct kmem_cache {
82718 unsigned int align; /* Alignment as calculated */
82719 unsigned long flags; /* Active flags on the slab */
82720 const char *name; /* Slab name for sysfs */
82721- int refcount; /* Use counter */
82722+ atomic_t refcount; /* Use counter */
82723 void (*ctor)(void *); /* Called on object slot creation */
82724 struct list_head list; /* List of all slab caches on the system */
82725 };
82726@@ -248,6 +267,10 @@ extern struct kmem_cache *kmalloc_caches[KMALLOC_SHIFT_HIGH + 1];
82727 extern struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
82728 #endif
82729
82730+#ifdef CONFIG_PAX_USERCOPY_SLABS
82731+extern struct kmem_cache *kmalloc_usercopy_caches[KMALLOC_SHIFT_HIGH + 1];
82732+#endif
82733+
82734 /*
82735 * Figure out which kmalloc slab an allocation of a certain size
82736 * belongs to.
82737@@ -256,7 +279,7 @@ extern struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
82738 * 2 = 120 .. 192 bytes
82739 * n = 2^(n-1) .. 2^n -1
82740 */
82741-static __always_inline int kmalloc_index(size_t size)
82742+static __always_inline __size_overflow(1) int kmalloc_index(size_t size)
82743 {
82744 if (!size)
82745 return 0;
82746@@ -299,11 +322,11 @@ static __always_inline int kmalloc_index(size_t size)
82747 }
82748 #endif /* !CONFIG_SLOB */
82749
82750-void *__kmalloc(size_t size, gfp_t flags);
82751+void *__kmalloc(size_t size, gfp_t flags) __alloc_size(1);
82752 void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags);
82753
82754 #ifdef CONFIG_NUMA
82755-void *__kmalloc_node(size_t size, gfp_t flags, int node);
82756+void *__kmalloc_node(size_t size, gfp_t flags, int node) __alloc_size(1);
82757 void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
82758 #else
82759 static __always_inline void *__kmalloc_node(size_t size, gfp_t flags, int node)
82760diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
82761index 8235dfb..47ce586 100644
82762--- a/include/linux/slab_def.h
82763+++ b/include/linux/slab_def.h
82764@@ -38,7 +38,7 @@ struct kmem_cache {
82765 /* 4) cache creation/removal */
82766 const char *name;
82767 struct list_head list;
82768- int refcount;
82769+ atomic_t refcount;
82770 int object_size;
82771 int align;
82772
82773@@ -54,10 +54,14 @@ struct kmem_cache {
82774 unsigned long node_allocs;
82775 unsigned long node_frees;
82776 unsigned long node_overflow;
82777- atomic_t allochit;
82778- atomic_t allocmiss;
82779- atomic_t freehit;
82780- atomic_t freemiss;
82781+ atomic_unchecked_t allochit;
82782+ atomic_unchecked_t allocmiss;
82783+ atomic_unchecked_t freehit;
82784+ atomic_unchecked_t freemiss;
82785+#ifdef CONFIG_PAX_MEMORY_SANITIZE
82786+ atomic_unchecked_t sanitized;
82787+ atomic_unchecked_t not_sanitized;
82788+#endif
82789
82790 /*
82791 * If debugging is enabled, then the allocator can add additional
82792diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
82793index f56bfa9..8378a26 100644
82794--- a/include/linux/slub_def.h
82795+++ b/include/linux/slub_def.h
82796@@ -74,7 +74,7 @@ struct kmem_cache {
82797 struct kmem_cache_order_objects max;
82798 struct kmem_cache_order_objects min;
82799 gfp_t allocflags; /* gfp flags to use on each alloc */
82800- int refcount; /* Refcount for slab cache destroy */
82801+ atomic_t refcount; /* Refcount for slab cache destroy */
82802 void (*ctor)(void *);
82803 int inuse; /* Offset to metadata */
82804 int align; /* Alignment */
82805diff --git a/include/linux/smp.h b/include/linux/smp.h
82806index 6ae004e..2743532 100644
82807--- a/include/linux/smp.h
82808+++ b/include/linux/smp.h
82809@@ -180,7 +180,9 @@ static inline void kick_all_cpus_sync(void) { }
82810 #endif
82811
82812 #define get_cpu() ({ preempt_disable(); smp_processor_id(); })
82813+#define raw_get_cpu() ({ raw_preempt_disable(); raw_smp_processor_id(); })
82814 #define put_cpu() preempt_enable()
82815+#define raw_put_cpu_no_resched() raw_preempt_enable_no_resched()
82816
82817 /*
82818 * Callback to arch code if there's nosmp or maxcpus=0 on the
82819diff --git a/include/linux/sock_diag.h b/include/linux/sock_diag.h
82820index 302ab80..3233276 100644
82821--- a/include/linux/sock_diag.h
82822+++ b/include/linux/sock_diag.h
82823@@ -11,7 +11,7 @@ struct sock;
82824 struct sock_diag_handler {
82825 __u8 family;
82826 int (*dump)(struct sk_buff *skb, struct nlmsghdr *nlh);
82827-};
82828+} __do_const;
82829
82830 int sock_diag_register(const struct sock_diag_handler *h);
82831 void sock_diag_unregister(const struct sock_diag_handler *h);
82832diff --git a/include/linux/sonet.h b/include/linux/sonet.h
82833index 680f9a3..f13aeb0 100644
82834--- a/include/linux/sonet.h
82835+++ b/include/linux/sonet.h
82836@@ -7,7 +7,7 @@
82837 #include <uapi/linux/sonet.h>
82838
82839 struct k_sonet_stats {
82840-#define __HANDLE_ITEM(i) atomic_t i
82841+#define __HANDLE_ITEM(i) atomic_unchecked_t i
82842 __SONET_ITEMS
82843 #undef __HANDLE_ITEM
82844 };
82845diff --git a/include/linux/sunrpc/addr.h b/include/linux/sunrpc/addr.h
82846index 07d8e53..dc934c9 100644
82847--- a/include/linux/sunrpc/addr.h
82848+++ b/include/linux/sunrpc/addr.h
82849@@ -23,9 +23,9 @@ static inline unsigned short rpc_get_port(const struct sockaddr *sap)
82850 {
82851 switch (sap->sa_family) {
82852 case AF_INET:
82853- return ntohs(((struct sockaddr_in *)sap)->sin_port);
82854+ return ntohs(((const struct sockaddr_in *)sap)->sin_port);
82855 case AF_INET6:
82856- return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
82857+ return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
82858 }
82859 return 0;
82860 }
82861@@ -58,7 +58,7 @@ static inline bool __rpc_cmp_addr4(const struct sockaddr *sap1,
82862 static inline bool __rpc_copy_addr4(struct sockaddr *dst,
82863 const struct sockaddr *src)
82864 {
82865- const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
82866+ const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
82867 struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
82868
82869 dsin->sin_family = ssin->sin_family;
82870@@ -164,7 +164,7 @@ static inline u32 rpc_get_scope_id(const struct sockaddr *sa)
82871 if (sa->sa_family != AF_INET6)
82872 return 0;
82873
82874- return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
82875+ return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
82876 }
82877
82878 #endif /* _LINUX_SUNRPC_ADDR_H */
82879diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
82880index 8af2804..c7414ef 100644
82881--- a/include/linux/sunrpc/clnt.h
82882+++ b/include/linux/sunrpc/clnt.h
82883@@ -97,7 +97,7 @@ struct rpc_procinfo {
82884 unsigned int p_timer; /* Which RTT timer to use */
82885 u32 p_statidx; /* Which procedure to account */
82886 const char * p_name; /* name of procedure */
82887-};
82888+} __do_const;
82889
82890 #ifdef __KERNEL__
82891
82892diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
82893index 04e7632..2e2a8a3 100644
82894--- a/include/linux/sunrpc/svc.h
82895+++ b/include/linux/sunrpc/svc.h
82896@@ -412,7 +412,7 @@ struct svc_procedure {
82897 unsigned int pc_count; /* call count */
82898 unsigned int pc_cachetype; /* cache info (NFS) */
82899 unsigned int pc_xdrressize; /* maximum size of XDR reply */
82900-};
82901+} __do_const;
82902
82903 /*
82904 * Function prototypes.
82905diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
82906index 0b8e3e6..33e0a01 100644
82907--- a/include/linux/sunrpc/svc_rdma.h
82908+++ b/include/linux/sunrpc/svc_rdma.h
82909@@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord;
82910 extern unsigned int svcrdma_max_requests;
82911 extern unsigned int svcrdma_max_req_size;
82912
82913-extern atomic_t rdma_stat_recv;
82914-extern atomic_t rdma_stat_read;
82915-extern atomic_t rdma_stat_write;
82916-extern atomic_t rdma_stat_sq_starve;
82917-extern atomic_t rdma_stat_rq_starve;
82918-extern atomic_t rdma_stat_rq_poll;
82919-extern atomic_t rdma_stat_rq_prod;
82920-extern atomic_t rdma_stat_sq_poll;
82921-extern atomic_t rdma_stat_sq_prod;
82922+extern atomic_unchecked_t rdma_stat_recv;
82923+extern atomic_unchecked_t rdma_stat_read;
82924+extern atomic_unchecked_t rdma_stat_write;
82925+extern atomic_unchecked_t rdma_stat_sq_starve;
82926+extern atomic_unchecked_t rdma_stat_rq_starve;
82927+extern atomic_unchecked_t rdma_stat_rq_poll;
82928+extern atomic_unchecked_t rdma_stat_rq_prod;
82929+extern atomic_unchecked_t rdma_stat_sq_poll;
82930+extern atomic_unchecked_t rdma_stat_sq_prod;
82931
82932 #define RPCRDMA_VERSION 1
82933
82934diff --git a/include/linux/sunrpc/svcauth.h b/include/linux/sunrpc/svcauth.h
82935index 8d71d65..f79586e 100644
82936--- a/include/linux/sunrpc/svcauth.h
82937+++ b/include/linux/sunrpc/svcauth.h
82938@@ -120,7 +120,7 @@ struct auth_ops {
82939 int (*release)(struct svc_rqst *rq);
82940 void (*domain_release)(struct auth_domain *);
82941 int (*set_client)(struct svc_rqst *rq);
82942-};
82943+} __do_const;
82944
82945 #define SVC_GARBAGE 1
82946 #define SVC_SYSERR 2
82947diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
82948index a5ffd32..0935dea 100644
82949--- a/include/linux/swiotlb.h
82950+++ b/include/linux/swiotlb.h
82951@@ -60,7 +60,8 @@ extern void
82952
82953 extern void
82954 swiotlb_free_coherent(struct device *hwdev, size_t size,
82955- void *vaddr, dma_addr_t dma_handle);
82956+ void *vaddr, dma_addr_t dma_handle,
82957+ struct dma_attrs *attrs);
82958
82959 extern dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
82960 unsigned long offset, size_t size,
82961diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
82962index a747a77..9e14df7 100644
82963--- a/include/linux/syscalls.h
82964+++ b/include/linux/syscalls.h
82965@@ -98,8 +98,14 @@ struct sigaltstack;
82966 #define __MAP(n,...) __MAP##n(__VA_ARGS__)
82967
82968 #define __SC_DECL(t, a) t a
82969+#define __TYPE_IS_U(t) (__same_type((t)0, 0UL) || __same_type((t)0, 0U) || __same_type((t)0, (unsigned short)0) || __same_type((t)0, (unsigned char)0))
82970 #define __TYPE_IS_LL(t) (__same_type((t)0, 0LL) || __same_type((t)0, 0ULL))
82971-#define __SC_LONG(t, a) __typeof(__builtin_choose_expr(__TYPE_IS_LL(t), 0LL, 0L)) a
82972+#define __SC_LONG(t, a) __typeof( \
82973+ __builtin_choose_expr( \
82974+ sizeof(t) > sizeof(int), \
82975+ (t) 0, \
82976+ __builtin_choose_expr(__TYPE_IS_U(t), 0UL, 0L) \
82977+ )) a
82978 #define __SC_CAST(t, a) (t) a
82979 #define __SC_ARGS(t, a) a
82980 #define __SC_TEST(t, a) (void)BUILD_BUG_ON_ZERO(!__TYPE_IS_LL(t) && sizeof(t) > sizeof(long))
82981@@ -371,11 +377,11 @@ asmlinkage long sys_sync(void);
82982 asmlinkage long sys_fsync(unsigned int fd);
82983 asmlinkage long sys_fdatasync(unsigned int fd);
82984 asmlinkage long sys_bdflush(int func, long data);
82985-asmlinkage long sys_mount(char __user *dev_name, char __user *dir_name,
82986- char __user *type, unsigned long flags,
82987+asmlinkage long sys_mount(const char __user *dev_name, const char __user *dir_name,
82988+ const char __user *type, unsigned long flags,
82989 void __user *data);
82990-asmlinkage long sys_umount(char __user *name, int flags);
82991-asmlinkage long sys_oldumount(char __user *name);
82992+asmlinkage long sys_umount(const char __user *name, int flags);
82993+asmlinkage long sys_oldumount(const char __user *name);
82994 asmlinkage long sys_truncate(const char __user *path, long length);
82995 asmlinkage long sys_ftruncate(unsigned int fd, unsigned long length);
82996 asmlinkage long sys_stat(const char __user *filename,
82997@@ -587,7 +593,7 @@ asmlinkage long sys_getsockname(int, struct sockaddr __user *, int __user *);
82998 asmlinkage long sys_getpeername(int, struct sockaddr __user *, int __user *);
82999 asmlinkage long sys_send(int, void __user *, size_t, unsigned);
83000 asmlinkage long sys_sendto(int, void __user *, size_t, unsigned,
83001- struct sockaddr __user *, int);
83002+ struct sockaddr __user *, int) __intentional_overflow(0);
83003 asmlinkage long sys_sendmsg(int fd, struct msghdr __user *msg, unsigned flags);
83004 asmlinkage long sys_sendmmsg(int fd, struct mmsghdr __user *msg,
83005 unsigned int vlen, unsigned flags);
83006diff --git a/include/linux/syscore_ops.h b/include/linux/syscore_ops.h
83007index 27b3b0b..e093dd9 100644
83008--- a/include/linux/syscore_ops.h
83009+++ b/include/linux/syscore_ops.h
83010@@ -16,7 +16,7 @@ struct syscore_ops {
83011 int (*suspend)(void);
83012 void (*resume)(void);
83013 void (*shutdown)(void);
83014-};
83015+} __do_const;
83016
83017 extern void register_syscore_ops(struct syscore_ops *ops);
83018 extern void unregister_syscore_ops(struct syscore_ops *ops);
83019diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
83020index 14a8ff2..fa95f3a 100644
83021--- a/include/linux/sysctl.h
83022+++ b/include/linux/sysctl.h
83023@@ -34,13 +34,13 @@ struct ctl_table_root;
83024 struct ctl_table_header;
83025 struct ctl_dir;
83026
83027-typedef struct ctl_table ctl_table;
83028-
83029 typedef int proc_handler (struct ctl_table *ctl, int write,
83030 void __user *buffer, size_t *lenp, loff_t *ppos);
83031
83032 extern int proc_dostring(struct ctl_table *, int,
83033 void __user *, size_t *, loff_t *);
83034+extern int proc_dostring_modpriv(struct ctl_table *, int,
83035+ void __user *, size_t *, loff_t *);
83036 extern int proc_dointvec(struct ctl_table *, int,
83037 void __user *, size_t *, loff_t *);
83038 extern int proc_dointvec_minmax(struct ctl_table *, int,
83039@@ -115,7 +115,9 @@ struct ctl_table
83040 struct ctl_table_poll *poll;
83041 void *extra1;
83042 void *extra2;
83043-};
83044+} __do_const __randomize_layout;
83045+typedef struct ctl_table __no_const ctl_table_no_const;
83046+typedef struct ctl_table ctl_table;
83047
83048 struct ctl_node {
83049 struct rb_node node;
83050diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h
83051index 30b2ebe..37412ef 100644
83052--- a/include/linux/sysfs.h
83053+++ b/include/linux/sysfs.h
83054@@ -34,7 +34,8 @@ struct attribute {
83055 struct lock_class_key *key;
83056 struct lock_class_key skey;
83057 #endif
83058-};
83059+} __do_const;
83060+typedef struct attribute __no_const attribute_no_const;
83061
83062 /**
83063 * sysfs_attr_init - initialize a dynamically allocated sysfs attribute
83064@@ -63,7 +64,8 @@ struct attribute_group {
83065 struct attribute *, int);
83066 struct attribute **attrs;
83067 struct bin_attribute **bin_attrs;
83068-};
83069+} __do_const;
83070+typedef struct attribute_group __no_const attribute_group_no_const;
83071
83072 /**
83073 * Use these macros to make defining attributes easier. See include/linux/device.h
83074@@ -127,7 +129,8 @@ struct bin_attribute {
83075 char *, loff_t, size_t);
83076 int (*mmap)(struct file *, struct kobject *, struct bin_attribute *attr,
83077 struct vm_area_struct *vma);
83078-};
83079+} __do_const;
83080+typedef struct bin_attribute __no_const bin_attribute_no_const;
83081
83082 /**
83083 * sysfs_bin_attr_init - initialize a dynamically allocated bin_attribute
83084diff --git a/include/linux/sysrq.h b/include/linux/sysrq.h
83085index 387fa7d..3fcde6b 100644
83086--- a/include/linux/sysrq.h
83087+++ b/include/linux/sysrq.h
83088@@ -16,6 +16,7 @@
83089
83090 #include <linux/errno.h>
83091 #include <linux/types.h>
83092+#include <linux/compiler.h>
83093
83094 /* Possible values of bitmask for enabling sysrq functions */
83095 /* 0x0001 is reserved for enable everything */
83096@@ -33,7 +34,7 @@ struct sysrq_key_op {
83097 char *help_msg;
83098 char *action_msg;
83099 int enable_mask;
83100-};
83101+} __do_const;
83102
83103 #ifdef CONFIG_MAGIC_SYSRQ
83104
83105diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
83106index a629e4b..3fea3d9 100644
83107--- a/include/linux/thread_info.h
83108+++ b/include/linux/thread_info.h
83109@@ -159,6 +159,13 @@ static inline bool test_and_clear_restore_sigmask(void)
83110 #error "no set_restore_sigmask() provided and default one won't work"
83111 #endif
83112
83113+extern void __check_object_size(const void *ptr, unsigned long n, bool to_user, bool const_size);
83114+
83115+static inline void check_object_size(const void *ptr, unsigned long n, bool to_user)
83116+{
83117+ __check_object_size(ptr, n, to_user, __builtin_constant_p(n));
83118+}
83119+
83120 #endif /* __KERNEL__ */
83121
83122 #endif /* _LINUX_THREAD_INFO_H */
83123diff --git a/include/linux/tty.h b/include/linux/tty.h
83124index b90b5c2..e23a512 100644
83125--- a/include/linux/tty.h
83126+++ b/include/linux/tty.h
83127@@ -202,7 +202,7 @@ struct tty_port {
83128 const struct tty_port_operations *ops; /* Port operations */
83129 spinlock_t lock; /* Lock protecting tty field */
83130 int blocked_open; /* Waiting to open */
83131- int count; /* Usage count */
83132+ atomic_t count; /* Usage count */
83133 wait_queue_head_t open_wait; /* Open waiters */
83134 wait_queue_head_t close_wait; /* Close waiters */
83135 wait_queue_head_t delta_msr_wait; /* Modem status change */
83136@@ -284,7 +284,7 @@ struct tty_struct {
83137 /* If the tty has a pending do_SAK, queue it here - akpm */
83138 struct work_struct SAK_work;
83139 struct tty_port *port;
83140-};
83141+} __randomize_layout;
83142
83143 /* Each of a tty's open files has private_data pointing to tty_file_private */
83144 struct tty_file_private {
83145@@ -550,7 +550,7 @@ extern int tty_port_open(struct tty_port *port,
83146 struct tty_struct *tty, struct file *filp);
83147 static inline int tty_port_users(struct tty_port *port)
83148 {
83149- return port->count + port->blocked_open;
83150+ return atomic_read(&port->count) + port->blocked_open;
83151 }
83152
83153 extern int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc);
83154diff --git a/include/linux/tty_driver.h b/include/linux/tty_driver.h
83155index 756a609..89db85e 100644
83156--- a/include/linux/tty_driver.h
83157+++ b/include/linux/tty_driver.h
83158@@ -285,7 +285,7 @@ struct tty_operations {
83159 void (*poll_put_char)(struct tty_driver *driver, int line, char ch);
83160 #endif
83161 const struct file_operations *proc_fops;
83162-};
83163+} __do_const __randomize_layout;
83164
83165 struct tty_driver {
83166 int magic; /* magic number for this structure */
83167@@ -319,7 +319,7 @@ struct tty_driver {
83168
83169 const struct tty_operations *ops;
83170 struct list_head tty_drivers;
83171-};
83172+} __randomize_layout;
83173
83174 extern struct list_head tty_drivers;
83175
83176diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h
83177index b8347c2..85d8b0f 100644
83178--- a/include/linux/tty_ldisc.h
83179+++ b/include/linux/tty_ldisc.h
83180@@ -213,7 +213,7 @@ struct tty_ldisc_ops {
83181
83182 struct module *owner;
83183
83184- int refcount;
83185+ atomic_t refcount;
83186 };
83187
83188 struct tty_ldisc {
83189diff --git a/include/linux/types.h b/include/linux/types.h
83190index 4d118ba..c3ee9bf 100644
83191--- a/include/linux/types.h
83192+++ b/include/linux/types.h
83193@@ -176,10 +176,26 @@ typedef struct {
83194 int counter;
83195 } atomic_t;
83196
83197+#ifdef CONFIG_PAX_REFCOUNT
83198+typedef struct {
83199+ int counter;
83200+} atomic_unchecked_t;
83201+#else
83202+typedef atomic_t atomic_unchecked_t;
83203+#endif
83204+
83205 #ifdef CONFIG_64BIT
83206 typedef struct {
83207 long counter;
83208 } atomic64_t;
83209+
83210+#ifdef CONFIG_PAX_REFCOUNT
83211+typedef struct {
83212+ long counter;
83213+} atomic64_unchecked_t;
83214+#else
83215+typedef atomic64_t atomic64_unchecked_t;
83216+#endif
83217 #endif
83218
83219 struct list_head {
83220diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
83221index ecd3319..8a36ded 100644
83222--- a/include/linux/uaccess.h
83223+++ b/include/linux/uaccess.h
83224@@ -75,11 +75,11 @@ static inline unsigned long __copy_from_user_nocache(void *to,
83225 long ret; \
83226 mm_segment_t old_fs = get_fs(); \
83227 \
83228- set_fs(KERNEL_DS); \
83229 pagefault_disable(); \
83230- ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
83231- pagefault_enable(); \
83232+ set_fs(KERNEL_DS); \
83233+ ret = __copy_from_user_inatomic(&(retval), (typeof(retval) __force_user *)(addr), sizeof(retval)); \
83234 set_fs(old_fs); \
83235+ pagefault_enable(); \
83236 ret; \
83237 })
83238
83239diff --git a/include/linux/uidgid.h b/include/linux/uidgid.h
83240index 2d1f9b6..d7a9fce 100644
83241--- a/include/linux/uidgid.h
83242+++ b/include/linux/uidgid.h
83243@@ -175,4 +175,9 @@ static inline bool kgid_has_mapping(struct user_namespace *ns, kgid_t gid)
83244
83245 #endif /* CONFIG_USER_NS */
83246
83247+#define GR_GLOBAL_UID(x) from_kuid_munged(&init_user_ns, (x))
83248+#define GR_GLOBAL_GID(x) from_kgid_munged(&init_user_ns, (x))
83249+#define gr_is_global_root(x) uid_eq((x), GLOBAL_ROOT_UID)
83250+#define gr_is_global_nonroot(x) (!uid_eq((x), GLOBAL_ROOT_UID))
83251+
83252 #endif /* _LINUX_UIDGID_H */
83253diff --git a/include/linux/unaligned/access_ok.h b/include/linux/unaligned/access_ok.h
83254index 99c1b4d..562e6f3 100644
83255--- a/include/linux/unaligned/access_ok.h
83256+++ b/include/linux/unaligned/access_ok.h
83257@@ -4,34 +4,34 @@
83258 #include <linux/kernel.h>
83259 #include <asm/byteorder.h>
83260
83261-static inline u16 get_unaligned_le16(const void *p)
83262+static inline u16 __intentional_overflow(-1) get_unaligned_le16(const void *p)
83263 {
83264- return le16_to_cpup((__le16 *)p);
83265+ return le16_to_cpup((const __le16 *)p);
83266 }
83267
83268-static inline u32 get_unaligned_le32(const void *p)
83269+static inline u32 __intentional_overflow(-1) get_unaligned_le32(const void *p)
83270 {
83271- return le32_to_cpup((__le32 *)p);
83272+ return le32_to_cpup((const __le32 *)p);
83273 }
83274
83275-static inline u64 get_unaligned_le64(const void *p)
83276+static inline u64 __intentional_overflow(-1) get_unaligned_le64(const void *p)
83277 {
83278- return le64_to_cpup((__le64 *)p);
83279+ return le64_to_cpup((const __le64 *)p);
83280 }
83281
83282-static inline u16 get_unaligned_be16(const void *p)
83283+static inline u16 __intentional_overflow(-1) get_unaligned_be16(const void *p)
83284 {
83285- return be16_to_cpup((__be16 *)p);
83286+ return be16_to_cpup((const __be16 *)p);
83287 }
83288
83289-static inline u32 get_unaligned_be32(const void *p)
83290+static inline u32 __intentional_overflow(-1) get_unaligned_be32(const void *p)
83291 {
83292- return be32_to_cpup((__be32 *)p);
83293+ return be32_to_cpup((const __be32 *)p);
83294 }
83295
83296-static inline u64 get_unaligned_be64(const void *p)
83297+static inline u64 __intentional_overflow(-1) get_unaligned_be64(const void *p)
83298 {
83299- return be64_to_cpup((__be64 *)p);
83300+ return be64_to_cpup((const __be64 *)p);
83301 }
83302
83303 static inline void put_unaligned_le16(u16 val, void *p)
83304diff --git a/include/linux/usb.h b/include/linux/usb.h
83305index 7f6eb85..656e806 100644
83306--- a/include/linux/usb.h
83307+++ b/include/linux/usb.h
83308@@ -563,7 +563,7 @@ struct usb_device {
83309 int maxchild;
83310
83311 u32 quirks;
83312- atomic_t urbnum;
83313+ atomic_unchecked_t urbnum;
83314
83315 unsigned long active_duration;
83316
83317@@ -1642,7 +1642,7 @@ void usb_buffer_unmap_sg(const struct usb_device *dev, int is_in,
83318
83319 extern int usb_control_msg(struct usb_device *dev, unsigned int pipe,
83320 __u8 request, __u8 requesttype, __u16 value, __u16 index,
83321- void *data, __u16 size, int timeout);
83322+ void *data, __u16 size, int timeout) __intentional_overflow(-1);
83323 extern int usb_interrupt_msg(struct usb_device *usb_dev, unsigned int pipe,
83324 void *data, int len, int *actual_length, int timeout);
83325 extern int usb_bulk_msg(struct usb_device *usb_dev, unsigned int pipe,
83326diff --git a/include/linux/usb/renesas_usbhs.h b/include/linux/usb/renesas_usbhs.h
83327index e452ba6..78f8e80 100644
83328--- a/include/linux/usb/renesas_usbhs.h
83329+++ b/include/linux/usb/renesas_usbhs.h
83330@@ -39,7 +39,7 @@ enum {
83331 */
83332 struct renesas_usbhs_driver_callback {
83333 int (*notify_hotplug)(struct platform_device *pdev);
83334-};
83335+} __no_const;
83336
83337 /*
83338 * callback functions for platform
83339diff --git a/include/linux/user_namespace.h b/include/linux/user_namespace.h
83340index 4836ba3..603f6ee 100644
83341--- a/include/linux/user_namespace.h
83342+++ b/include/linux/user_namespace.h
83343@@ -33,7 +33,7 @@ struct user_namespace {
83344 struct key *persistent_keyring_register;
83345 struct rw_semaphore persistent_keyring_register_sem;
83346 #endif
83347-};
83348+} __randomize_layout;
83349
83350 extern struct user_namespace init_user_ns;
83351
83352diff --git a/include/linux/utsname.h b/include/linux/utsname.h
83353index 239e277..22a5cf5 100644
83354--- a/include/linux/utsname.h
83355+++ b/include/linux/utsname.h
83356@@ -24,7 +24,7 @@ struct uts_namespace {
83357 struct new_utsname name;
83358 struct user_namespace *user_ns;
83359 unsigned int proc_inum;
83360-};
83361+} __randomize_layout;
83362 extern struct uts_namespace init_uts_ns;
83363
83364 #ifdef CONFIG_UTS_NS
83365diff --git a/include/linux/vermagic.h b/include/linux/vermagic.h
83366index 6f8fbcf..4efc177 100644
83367--- a/include/linux/vermagic.h
83368+++ b/include/linux/vermagic.h
83369@@ -25,9 +25,42 @@
83370 #define MODULE_ARCH_VERMAGIC ""
83371 #endif
83372
83373+#ifdef CONFIG_PAX_REFCOUNT
83374+#define MODULE_PAX_REFCOUNT "REFCOUNT "
83375+#else
83376+#define MODULE_PAX_REFCOUNT ""
83377+#endif
83378+
83379+#ifdef CONSTIFY_PLUGIN
83380+#define MODULE_CONSTIFY_PLUGIN "CONSTIFY_PLUGIN "
83381+#else
83382+#define MODULE_CONSTIFY_PLUGIN ""
83383+#endif
83384+
83385+#ifdef STACKLEAK_PLUGIN
83386+#define MODULE_STACKLEAK_PLUGIN "STACKLEAK_PLUGIN "
83387+#else
83388+#define MODULE_STACKLEAK_PLUGIN ""
83389+#endif
83390+
83391+#ifdef RANDSTRUCT_PLUGIN
83392+#include <generated/randomize_layout_hash.h>
83393+#define MODULE_RANDSTRUCT_PLUGIN "RANDSTRUCT_PLUGIN_" RANDSTRUCT_HASHED_SEED
83394+#else
83395+#define MODULE_RANDSTRUCT_PLUGIN
83396+#endif
83397+
83398+#ifdef CONFIG_GRKERNSEC
83399+#define MODULE_GRSEC "GRSEC "
83400+#else
83401+#define MODULE_GRSEC ""
83402+#endif
83403+
83404 #define VERMAGIC_STRING \
83405 UTS_RELEASE " " \
83406 MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT \
83407 MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_MODVERSIONS \
83408- MODULE_ARCH_VERMAGIC
83409+ MODULE_ARCH_VERMAGIC \
83410+ MODULE_PAX_REFCOUNT MODULE_CONSTIFY_PLUGIN MODULE_STACKLEAK_PLUGIN \
83411+ MODULE_GRSEC MODULE_RANDSTRUCT_PLUGIN
83412
83413diff --git a/include/linux/vga_switcheroo.h b/include/linux/vga_switcheroo.h
83414index 502073a..a7de024 100644
83415--- a/include/linux/vga_switcheroo.h
83416+++ b/include/linux/vga_switcheroo.h
83417@@ -63,8 +63,8 @@ int vga_switcheroo_get_client_state(struct pci_dev *dev);
83418
83419 void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic);
83420
83421-int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain);
83422-int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain);
83423+int vga_switcheroo_init_domain_pm_ops(struct device *dev, dev_pm_domain_no_const *domain);
83424+int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, dev_pm_domain_no_const *domain);
83425 #else
83426
83427 static inline void vga_switcheroo_unregister_client(struct pci_dev *dev) {}
83428@@ -81,8 +81,8 @@ static inline int vga_switcheroo_get_client_state(struct pci_dev *dev) { return
83429
83430 static inline void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic) {}
83431
83432-static inline int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain) { return -EINVAL; }
83433-static inline int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain) { return -EINVAL; }
83434+static inline int vga_switcheroo_init_domain_pm_ops(struct device *dev, dev_pm_domain_no_const *domain) { return -EINVAL; }
83435+static inline int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, dev_pm_domain_no_const *domain) { return -EINVAL; }
83436
83437 #endif
83438 #endif /* _LINUX_VGA_SWITCHEROO_H_ */
83439diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
83440index 4b8a891..05f2361 100644
83441--- a/include/linux/vmalloc.h
83442+++ b/include/linux/vmalloc.h
83443@@ -16,6 +16,11 @@ struct vm_area_struct; /* vma defining user mapping in mm_types.h */
83444 #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
83445 #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
83446 #define VM_UNINITIALIZED 0x00000020 /* vm_struct is not fully initialized */
83447+
83448+#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
83449+#define VM_KERNEXEC 0x00000040 /* allocate from executable kernel memory range */
83450+#endif
83451+
83452 /* bits [20..32] reserved for arch specific ioremap internals */
83453
83454 /*
83455@@ -72,6 +77,7 @@ extern void *vzalloc_node(unsigned long size, int node);
83456 extern void *vmalloc_exec(unsigned long size);
83457 extern void *vmalloc_32(unsigned long size);
83458 extern void *vmalloc_32_user(unsigned long size);
83459+extern void *vmalloc_stack(int node);
83460 extern void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot);
83461 extern void *__vmalloc_node_range(unsigned long size, unsigned long align,
83462 unsigned long start, unsigned long end, gfp_t gfp_mask,
83463@@ -142,7 +148,7 @@ extern void free_vm_area(struct vm_struct *area);
83464
83465 /* for /dev/kmem */
83466 extern long vread(char *buf, char *addr, unsigned long count);
83467-extern long vwrite(char *buf, char *addr, unsigned long count);
83468+extern long vwrite(char *buf, char *addr, unsigned long count) __size_overflow(3);
83469
83470 /*
83471 * Internals. Dont't use..
83472diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
83473index 67ce70c..d540954 100644
83474--- a/include/linux/vmstat.h
83475+++ b/include/linux/vmstat.h
83476@@ -98,18 +98,18 @@ static inline void vm_events_fold_cpu(int cpu)
83477 /*
83478 * Zone based page accounting with per cpu differentials.
83479 */
83480-extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
83481+extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
83482
83483 static inline void zone_page_state_add(long x, struct zone *zone,
83484 enum zone_stat_item item)
83485 {
83486- atomic_long_add(x, &zone->vm_stat[item]);
83487- atomic_long_add(x, &vm_stat[item]);
83488+ atomic_long_add_unchecked(x, &zone->vm_stat[item]);
83489+ atomic_long_add_unchecked(x, &vm_stat[item]);
83490 }
83491
83492-static inline unsigned long global_page_state(enum zone_stat_item item)
83493+static inline unsigned long __intentional_overflow(-1) global_page_state(enum zone_stat_item item)
83494 {
83495- long x = atomic_long_read(&vm_stat[item]);
83496+ long x = atomic_long_read_unchecked(&vm_stat[item]);
83497 #ifdef CONFIG_SMP
83498 if (x < 0)
83499 x = 0;
83500@@ -117,10 +117,10 @@ static inline unsigned long global_page_state(enum zone_stat_item item)
83501 return x;
83502 }
83503
83504-static inline unsigned long zone_page_state(struct zone *zone,
83505+static inline unsigned long __intentional_overflow(-1) zone_page_state(struct zone *zone,
83506 enum zone_stat_item item)
83507 {
83508- long x = atomic_long_read(&zone->vm_stat[item]);
83509+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
83510 #ifdef CONFIG_SMP
83511 if (x < 0)
83512 x = 0;
83513@@ -137,7 +137,7 @@ static inline unsigned long zone_page_state(struct zone *zone,
83514 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
83515 enum zone_stat_item item)
83516 {
83517- long x = atomic_long_read(&zone->vm_stat[item]);
83518+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
83519
83520 #ifdef CONFIG_SMP
83521 int cpu;
83522@@ -226,8 +226,8 @@ static inline void __mod_zone_page_state(struct zone *zone,
83523
83524 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
83525 {
83526- atomic_long_inc(&zone->vm_stat[item]);
83527- atomic_long_inc(&vm_stat[item]);
83528+ atomic_long_inc_unchecked(&zone->vm_stat[item]);
83529+ atomic_long_inc_unchecked(&vm_stat[item]);
83530 }
83531
83532 static inline void __inc_zone_page_state(struct page *page,
83533@@ -238,8 +238,8 @@ static inline void __inc_zone_page_state(struct page *page,
83534
83535 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
83536 {
83537- atomic_long_dec(&zone->vm_stat[item]);
83538- atomic_long_dec(&vm_stat[item]);
83539+ atomic_long_dec_unchecked(&zone->vm_stat[item]);
83540+ atomic_long_dec_unchecked(&vm_stat[item]);
83541 }
83542
83543 static inline void __dec_zone_page_state(struct page *page,
83544diff --git a/include/linux/xattr.h b/include/linux/xattr.h
83545index 91b0a68..0e9adf6 100644
83546--- a/include/linux/xattr.h
83547+++ b/include/linux/xattr.h
83548@@ -28,7 +28,7 @@ struct xattr_handler {
83549 size_t size, int handler_flags);
83550 int (*set)(struct dentry *dentry, const char *name, const void *buffer,
83551 size_t size, int flags, int handler_flags);
83552-};
83553+} __do_const;
83554
83555 struct xattr {
83556 const char *name;
83557@@ -37,6 +37,9 @@ struct xattr {
83558 };
83559
83560 ssize_t xattr_getsecurity(struct inode *, const char *, void *, size_t);
83561+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
83562+ssize_t pax_getxattr(struct dentry *, void *, size_t);
83563+#endif
83564 ssize_t vfs_getxattr(struct dentry *, const char *, void *, size_t);
83565 ssize_t vfs_listxattr(struct dentry *d, char *list, size_t size);
83566 int __vfs_setxattr_noperm(struct dentry *, const char *, const void *, size_t, int);
83567diff --git a/include/linux/zlib.h b/include/linux/zlib.h
83568index 9c5a6b4..09c9438 100644
83569--- a/include/linux/zlib.h
83570+++ b/include/linux/zlib.h
83571@@ -31,6 +31,7 @@
83572 #define _ZLIB_H
83573
83574 #include <linux/zconf.h>
83575+#include <linux/compiler.h>
83576
83577 /* zlib deflate based on ZLIB_VERSION "1.1.3" */
83578 /* zlib inflate based on ZLIB_VERSION "1.2.3" */
83579@@ -179,7 +180,7 @@ typedef z_stream *z_streamp;
83580
83581 /* basic functions */
83582
83583-extern int zlib_deflate_workspacesize (int windowBits, int memLevel);
83584+extern int zlib_deflate_workspacesize (int windowBits, int memLevel) __intentional_overflow(0);
83585 /*
83586 Returns the number of bytes that needs to be allocated for a per-
83587 stream workspace with the specified parameters. A pointer to this
83588diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h
83589index c768c9f..bdcaa5a 100644
83590--- a/include/media/v4l2-dev.h
83591+++ b/include/media/v4l2-dev.h
83592@@ -76,7 +76,7 @@ struct v4l2_file_operations {
83593 int (*mmap) (struct file *, struct vm_area_struct *);
83594 int (*open) (struct file *);
83595 int (*release) (struct file *);
83596-};
83597+} __do_const;
83598
83599 /*
83600 * Newer version of video_device, handled by videodev2.c
83601diff --git a/include/media/v4l2-device.h b/include/media/v4l2-device.h
83602index c9b1593..a572459 100644
83603--- a/include/media/v4l2-device.h
83604+++ b/include/media/v4l2-device.h
83605@@ -95,7 +95,7 @@ int __must_check v4l2_device_register(struct device *dev, struct v4l2_device *v4
83606 this function returns 0. If the name ends with a digit (e.g. cx18),
83607 then the name will be set to cx18-0 since cx180 looks really odd. */
83608 int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
83609- atomic_t *instance);
83610+ atomic_unchecked_t *instance);
83611
83612 /* Set v4l2_dev->dev to NULL. Call when the USB parent disconnects.
83613 Since the parent disappears this ensures that v4l2_dev doesn't have an
83614diff --git a/include/net/9p/transport.h b/include/net/9p/transport.h
83615index 9a36d92..0aafe2a 100644
83616--- a/include/net/9p/transport.h
83617+++ b/include/net/9p/transport.h
83618@@ -60,7 +60,7 @@ struct p9_trans_module {
83619 int (*cancel) (struct p9_client *, struct p9_req_t *req);
83620 int (*zc_request)(struct p9_client *, struct p9_req_t *,
83621 char *, char *, int , int, int, int);
83622-};
83623+} __do_const;
83624
83625 void v9fs_register_trans(struct p9_trans_module *m);
83626 void v9fs_unregister_trans(struct p9_trans_module *m);
83627diff --git a/include/net/af_unix.h b/include/net/af_unix.h
83628index a175ba4..196eb82 100644
83629--- a/include/net/af_unix.h
83630+++ b/include/net/af_unix.h
83631@@ -36,7 +36,7 @@ struct unix_skb_parms {
83632 u32 secid; /* Security ID */
83633 #endif
83634 u32 consumed;
83635-};
83636+} __randomize_layout;
83637
83638 #define UNIXCB(skb) (*(struct unix_skb_parms *)&((skb)->cb))
83639 #define UNIXSID(skb) (&UNIXCB((skb)).secid)
83640diff --git a/include/net/bluetooth/l2cap.h b/include/net/bluetooth/l2cap.h
83641index dbc4a89..4a59b5d 100644
83642--- a/include/net/bluetooth/l2cap.h
83643+++ b/include/net/bluetooth/l2cap.h
83644@@ -600,7 +600,7 @@ struct l2cap_ops {
83645 long (*get_sndtimeo) (struct l2cap_chan *chan);
83646 struct sk_buff *(*alloc_skb) (struct l2cap_chan *chan,
83647 unsigned long len, int nb);
83648-};
83649+} __do_const;
83650
83651 struct l2cap_conn {
83652 struct hci_conn *hcon;
83653diff --git a/include/net/caif/cfctrl.h b/include/net/caif/cfctrl.h
83654index f2ae33d..c457cf0 100644
83655--- a/include/net/caif/cfctrl.h
83656+++ b/include/net/caif/cfctrl.h
83657@@ -52,7 +52,7 @@ struct cfctrl_rsp {
83658 void (*radioset_rsp)(void);
83659 void (*reject_rsp)(struct cflayer *layer, u8 linkid,
83660 struct cflayer *client_layer);
83661-};
83662+} __no_const;
83663
83664 /* Link Setup Parameters for CAIF-Links. */
83665 struct cfctrl_link_param {
83666@@ -101,8 +101,8 @@ struct cfctrl_request_info {
83667 struct cfctrl {
83668 struct cfsrvl serv;
83669 struct cfctrl_rsp res;
83670- atomic_t req_seq_no;
83671- atomic_t rsp_seq_no;
83672+ atomic_unchecked_t req_seq_no;
83673+ atomic_unchecked_t rsp_seq_no;
83674 struct list_head list;
83675 /* Protects from simultaneous access to first_req list */
83676 spinlock_t info_list_lock;
83677diff --git a/include/net/flow.h b/include/net/flow.h
83678index d23e7fa..e188307 100644
83679--- a/include/net/flow.h
83680+++ b/include/net/flow.h
83681@@ -221,6 +221,6 @@ struct flow_cache_object *flow_cache_lookup(struct net *net,
83682
83683 void flow_cache_flush(void);
83684 void flow_cache_flush_deferred(void);
83685-extern atomic_t flow_cache_genid;
83686+extern atomic_unchecked_t flow_cache_genid;
83687
83688 #endif
83689diff --git a/include/net/genetlink.h b/include/net/genetlink.h
83690index 93695f0..766d71c 100644
83691--- a/include/net/genetlink.h
83692+++ b/include/net/genetlink.h
83693@@ -120,7 +120,7 @@ struct genl_ops {
83694 u8 cmd;
83695 u8 internal_flags;
83696 u8 flags;
83697-};
83698+} __do_const;
83699
83700 int __genl_register_family(struct genl_family *family);
83701
83702diff --git a/include/net/gro_cells.h b/include/net/gro_cells.h
83703index 734d9b5..48a9a4b 100644
83704--- a/include/net/gro_cells.h
83705+++ b/include/net/gro_cells.h
83706@@ -29,7 +29,7 @@ static inline void gro_cells_receive(struct gro_cells *gcells, struct sk_buff *s
83707 cell += skb_get_rx_queue(skb) & gcells->gro_cells_mask;
83708
83709 if (skb_queue_len(&cell->napi_skbs) > netdev_max_backlog) {
83710- atomic_long_inc(&dev->rx_dropped);
83711+ atomic_long_inc_unchecked(&dev->rx_dropped);
83712 kfree_skb(skb);
83713 return;
83714 }
83715diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
83716index c55aeed..b3393f4 100644
83717--- a/include/net/inet_connection_sock.h
83718+++ b/include/net/inet_connection_sock.h
83719@@ -62,7 +62,7 @@ struct inet_connection_sock_af_ops {
83720 void (*addr2sockaddr)(struct sock *sk, struct sockaddr *);
83721 int (*bind_conflict)(const struct sock *sk,
83722 const struct inet_bind_bucket *tb, bool relax);
83723-};
83724+} __do_const;
83725
83726 /** inet_connection_sock - INET connection oriented sock
83727 *
83728diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
83729index 6efe73c..1a44af7 100644
83730--- a/include/net/inetpeer.h
83731+++ b/include/net/inetpeer.h
83732@@ -47,8 +47,8 @@ struct inet_peer {
83733 */
83734 union {
83735 struct {
83736- atomic_t rid; /* Frag reception counter */
83737- atomic_t ip_id_count; /* IP ID for the next packet */
83738+ atomic_unchecked_t rid; /* Frag reception counter */
83739+ atomic_unchecked_t ip_id_count; /* IP ID for the next packet */
83740 };
83741 struct rcu_head rcu;
83742 struct inet_peer *gc_next;
83743@@ -177,16 +177,9 @@ static inline void inet_peer_refcheck(const struct inet_peer *p)
83744 /* can be called with or without local BH being disabled */
83745 static inline int inet_getid(struct inet_peer *p, int more)
83746 {
83747- int old, new;
83748 more++;
83749 inet_peer_refcheck(p);
83750- do {
83751- old = atomic_read(&p->ip_id_count);
83752- new = old + more;
83753- if (!new)
83754- new = 1;
83755- } while (atomic_cmpxchg(&p->ip_id_count, old, new) != old);
83756- return new;
83757+ return atomic_add_return_unchecked(more, &p->ip_id_count) - more;
83758 }
83759
83760 #endif /* _NET_INETPEER_H */
83761diff --git a/include/net/ip.h b/include/net/ip.h
83762index 23be0fd..0cb3e2c 100644
83763--- a/include/net/ip.h
83764+++ b/include/net/ip.h
83765@@ -214,7 +214,7 @@ static inline void snmp_mib_free(void __percpu *ptr[SNMP_ARRAY_SZ])
83766
83767 void inet_get_local_port_range(struct net *net, int *low, int *high);
83768
83769-extern unsigned long *sysctl_local_reserved_ports;
83770+extern unsigned long sysctl_local_reserved_ports[65536 / 8 / sizeof(unsigned long)];
83771 static inline int inet_is_reserved_local_port(int port)
83772 {
83773 return test_bit(port, sysctl_local_reserved_ports);
83774diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
83775index 9922093..a1755d6 100644
83776--- a/include/net/ip_fib.h
83777+++ b/include/net/ip_fib.h
83778@@ -169,7 +169,7 @@ __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh);
83779
83780 #define FIB_RES_SADDR(net, res) \
83781 ((FIB_RES_NH(res).nh_saddr_genid == \
83782- atomic_read(&(net)->ipv4.dev_addr_genid)) ? \
83783+ atomic_read_unchecked(&(net)->ipv4.dev_addr_genid)) ? \
83784 FIB_RES_NH(res).nh_saddr : \
83785 fib_info_update_nh_saddr((net), &FIB_RES_NH(res)))
83786 #define FIB_RES_GW(res) (FIB_RES_NH(res).nh_gw)
83787diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
83788index 5679d92..2e7a690 100644
83789--- a/include/net/ip_vs.h
83790+++ b/include/net/ip_vs.h
83791@@ -558,7 +558,7 @@ struct ip_vs_conn {
83792 struct ip_vs_conn *control; /* Master control connection */
83793 atomic_t n_control; /* Number of controlled ones */
83794 struct ip_vs_dest *dest; /* real server */
83795- atomic_t in_pkts; /* incoming packet counter */
83796+ atomic_unchecked_t in_pkts; /* incoming packet counter */
83797
83798 /* packet transmitter for different forwarding methods. If it
83799 mangles the packet, it must return NF_DROP or better NF_STOLEN,
83800@@ -705,7 +705,7 @@ struct ip_vs_dest {
83801 __be16 port; /* port number of the server */
83802 union nf_inet_addr addr; /* IP address of the server */
83803 volatile unsigned int flags; /* dest status flags */
83804- atomic_t conn_flags; /* flags to copy to conn */
83805+ atomic_unchecked_t conn_flags; /* flags to copy to conn */
83806 atomic_t weight; /* server weight */
83807
83808 atomic_t refcnt; /* reference counter */
83809@@ -960,11 +960,11 @@ struct netns_ipvs {
83810 /* ip_vs_lblc */
83811 int sysctl_lblc_expiration;
83812 struct ctl_table_header *lblc_ctl_header;
83813- struct ctl_table *lblc_ctl_table;
83814+ ctl_table_no_const *lblc_ctl_table;
83815 /* ip_vs_lblcr */
83816 int sysctl_lblcr_expiration;
83817 struct ctl_table_header *lblcr_ctl_header;
83818- struct ctl_table *lblcr_ctl_table;
83819+ ctl_table_no_const *lblcr_ctl_table;
83820 /* ip_vs_est */
83821 struct list_head est_list; /* estimator list */
83822 spinlock_t est_lock;
83823diff --git a/include/net/irda/ircomm_tty.h b/include/net/irda/ircomm_tty.h
83824index 8d4f588..2e37ad2 100644
83825--- a/include/net/irda/ircomm_tty.h
83826+++ b/include/net/irda/ircomm_tty.h
83827@@ -33,6 +33,7 @@
83828 #include <linux/termios.h>
83829 #include <linux/timer.h>
83830 #include <linux/tty.h> /* struct tty_struct */
83831+#include <asm/local.h>
83832
83833 #include <net/irda/irias_object.h>
83834 #include <net/irda/ircomm_core.h>
83835diff --git a/include/net/iucv/af_iucv.h b/include/net/iucv/af_iucv.h
83836index 714cc9a..ea05f3e 100644
83837--- a/include/net/iucv/af_iucv.h
83838+++ b/include/net/iucv/af_iucv.h
83839@@ -149,7 +149,7 @@ struct iucv_skb_cb {
83840 struct iucv_sock_list {
83841 struct hlist_head head;
83842 rwlock_t lock;
83843- atomic_t autobind_name;
83844+ atomic_unchecked_t autobind_name;
83845 };
83846
83847 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
83848diff --git a/include/net/llc_c_ac.h b/include/net/llc_c_ac.h
83849index f3be818..bf46196 100644
83850--- a/include/net/llc_c_ac.h
83851+++ b/include/net/llc_c_ac.h
83852@@ -87,7 +87,7 @@
83853 #define LLC_CONN_AC_STOP_SENDACK_TMR 70
83854 #define LLC_CONN_AC_START_SENDACK_TMR_IF_NOT_RUNNING 71
83855
83856-typedef int (*llc_conn_action_t)(struct sock *sk, struct sk_buff *skb);
83857+typedef int (* const llc_conn_action_t)(struct sock *sk, struct sk_buff *skb);
83858
83859 int llc_conn_ac_clear_remote_busy(struct sock *sk, struct sk_buff *skb);
83860 int llc_conn_ac_conn_ind(struct sock *sk, struct sk_buff *skb);
83861diff --git a/include/net/llc_c_ev.h b/include/net/llc_c_ev.h
83862index 3948cf1..83b28c4 100644
83863--- a/include/net/llc_c_ev.h
83864+++ b/include/net/llc_c_ev.h
83865@@ -125,8 +125,8 @@ static __inline__ struct llc_conn_state_ev *llc_conn_ev(struct sk_buff *skb)
83866 return (struct llc_conn_state_ev *)skb->cb;
83867 }
83868
83869-typedef int (*llc_conn_ev_t)(struct sock *sk, struct sk_buff *skb);
83870-typedef int (*llc_conn_ev_qfyr_t)(struct sock *sk, struct sk_buff *skb);
83871+typedef int (* const llc_conn_ev_t)(struct sock *sk, struct sk_buff *skb);
83872+typedef int (* const llc_conn_ev_qfyr_t)(struct sock *sk, struct sk_buff *skb);
83873
83874 int llc_conn_ev_conn_req(struct sock *sk, struct sk_buff *skb);
83875 int llc_conn_ev_data_req(struct sock *sk, struct sk_buff *skb);
83876diff --git a/include/net/llc_c_st.h b/include/net/llc_c_st.h
83877index 0e79cfb..f46db31 100644
83878--- a/include/net/llc_c_st.h
83879+++ b/include/net/llc_c_st.h
83880@@ -37,7 +37,7 @@ struct llc_conn_state_trans {
83881 u8 next_state;
83882 llc_conn_ev_qfyr_t *ev_qualifiers;
83883 llc_conn_action_t *ev_actions;
83884-};
83885+} __do_const;
83886
83887 struct llc_conn_state {
83888 u8 current_state;
83889diff --git a/include/net/llc_s_ac.h b/include/net/llc_s_ac.h
83890index a61b98c..aade1eb 100644
83891--- a/include/net/llc_s_ac.h
83892+++ b/include/net/llc_s_ac.h
83893@@ -23,7 +23,7 @@
83894 #define SAP_ACT_TEST_IND 9
83895
83896 /* All action functions must look like this */
83897-typedef int (*llc_sap_action_t)(struct llc_sap *sap, struct sk_buff *skb);
83898+typedef int (* const llc_sap_action_t)(struct llc_sap *sap, struct sk_buff *skb);
83899
83900 int llc_sap_action_unitdata_ind(struct llc_sap *sap, struct sk_buff *skb);
83901 int llc_sap_action_send_ui(struct llc_sap *sap, struct sk_buff *skb);
83902diff --git a/include/net/llc_s_st.h b/include/net/llc_s_st.h
83903index 567c681..cd73ac02 100644
83904--- a/include/net/llc_s_st.h
83905+++ b/include/net/llc_s_st.h
83906@@ -20,7 +20,7 @@ struct llc_sap_state_trans {
83907 llc_sap_ev_t ev;
83908 u8 next_state;
83909 llc_sap_action_t *ev_actions;
83910-};
83911+} __do_const;
83912
83913 struct llc_sap_state {
83914 u8 curr_state;
83915diff --git a/include/net/mac80211.h b/include/net/mac80211.h
83916index f4ab2fb..71a85ba 100644
83917--- a/include/net/mac80211.h
83918+++ b/include/net/mac80211.h
83919@@ -4476,7 +4476,7 @@ struct rate_control_ops {
83920 void (*add_sta_debugfs)(void *priv, void *priv_sta,
83921 struct dentry *dir);
83922 void (*remove_sta_debugfs)(void *priv, void *priv_sta);
83923-};
83924+} __do_const;
83925
83926 static inline int rate_supported(struct ieee80211_sta *sta,
83927 enum ieee80211_band band,
83928diff --git a/include/net/neighbour.h b/include/net/neighbour.h
83929index 7277caf..fd095bc 100644
83930--- a/include/net/neighbour.h
83931+++ b/include/net/neighbour.h
83932@@ -163,7 +163,7 @@ struct neigh_ops {
83933 void (*error_report)(struct neighbour *, struct sk_buff *);
83934 int (*output)(struct neighbour *, struct sk_buff *);
83935 int (*connected_output)(struct neighbour *, struct sk_buff *);
83936-};
83937+} __do_const;
83938
83939 struct pneigh_entry {
83940 struct pneigh_entry *next;
83941@@ -203,7 +203,6 @@ struct neigh_table {
83942 void (*proxy_redo)(struct sk_buff *skb);
83943 char *id;
83944 struct neigh_parms parms;
83945- /* HACK. gc_* should follow parms without a gap! */
83946 int gc_interval;
83947 int gc_thresh1;
83948 int gc_thresh2;
83949@@ -218,7 +217,7 @@ struct neigh_table {
83950 struct neigh_statistics __percpu *stats;
83951 struct neigh_hash_table __rcu *nht;
83952 struct pneigh_entry **phash_buckets;
83953-};
83954+} __randomize_layout;
83955
83956 static inline int neigh_parms_family(struct neigh_parms *p)
83957 {
83958diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
83959index 991dcd9..ab58d00 100644
83960--- a/include/net/net_namespace.h
83961+++ b/include/net/net_namespace.h
83962@@ -124,8 +124,8 @@ struct net {
83963 struct netns_ipvs *ipvs;
83964 #endif
83965 struct sock *diag_nlsk;
83966- atomic_t fnhe_genid;
83967-};
83968+ atomic_unchecked_t fnhe_genid;
83969+} __randomize_layout;
83970
83971 /*
83972 * ifindex generation is per-net namespace, and loopback is
83973@@ -289,7 +289,11 @@ static inline struct net *read_pnet(struct net * const *pnet)
83974 #define __net_init __init
83975 #define __net_exit __exit_refok
83976 #define __net_initdata __initdata
83977+#ifdef CONSTIFY_PLUGIN
83978 #define __net_initconst __initconst
83979+#else
83980+#define __net_initconst __initdata
83981+#endif
83982 #endif
83983
83984 struct pernet_operations {
83985@@ -299,7 +303,7 @@ struct pernet_operations {
83986 void (*exit_batch)(struct list_head *net_exit_list);
83987 int *id;
83988 size_t size;
83989-};
83990+} __do_const;
83991
83992 /*
83993 * Use these carefully. If you implement a network device and it
83994@@ -347,23 +351,23 @@ static inline void unregister_net_sysctl_table(struct ctl_table_header *header)
83995
83996 static inline int rt_genid_ipv4(struct net *net)
83997 {
83998- return atomic_read(&net->ipv4.rt_genid);
83999+ return atomic_read_unchecked(&net->ipv4.rt_genid);
84000 }
84001
84002 static inline void rt_genid_bump_ipv4(struct net *net)
84003 {
84004- atomic_inc(&net->ipv4.rt_genid);
84005+ atomic_inc_unchecked(&net->ipv4.rt_genid);
84006 }
84007
84008 #if IS_ENABLED(CONFIG_IPV6)
84009 static inline int rt_genid_ipv6(struct net *net)
84010 {
84011- return atomic_read(&net->ipv6.rt_genid);
84012+ return atomic_read_unchecked(&net->ipv6.rt_genid);
84013 }
84014
84015 static inline void rt_genid_bump_ipv6(struct net *net)
84016 {
84017- atomic_inc(&net->ipv6.rt_genid);
84018+ atomic_inc_unchecked(&net->ipv6.rt_genid);
84019 }
84020 #else
84021 static inline int rt_genid_ipv6(struct net *net)
84022@@ -385,12 +389,12 @@ static inline void rt_genid_bump_all(struct net *net)
84023
84024 static inline int fnhe_genid(struct net *net)
84025 {
84026- return atomic_read(&net->fnhe_genid);
84027+ return atomic_read_unchecked(&net->fnhe_genid);
84028 }
84029
84030 static inline void fnhe_genid_bump(struct net *net)
84031 {
84032- atomic_inc(&net->fnhe_genid);
84033+ atomic_inc_unchecked(&net->fnhe_genid);
84034 }
84035
84036 #endif /* __NET_NET_NAMESPACE_H */
84037diff --git a/include/net/netdma.h b/include/net/netdma.h
84038index 8ba8ce2..99b7fff 100644
84039--- a/include/net/netdma.h
84040+++ b/include/net/netdma.h
84041@@ -24,7 +24,7 @@
84042 #include <linux/dmaengine.h>
84043 #include <linux/skbuff.h>
84044
84045-int dma_skb_copy_datagram_iovec(struct dma_chan* chan,
84046+int __intentional_overflow(3,5) dma_skb_copy_datagram_iovec(struct dma_chan* chan,
84047 struct sk_buff *skb, int offset, struct iovec *to,
84048 size_t len, struct dma_pinned_list *pinned_list);
84049
84050diff --git a/include/net/netlink.h b/include/net/netlink.h
84051index 2b47eaa..6d5bcc2 100644
84052--- a/include/net/netlink.h
84053+++ b/include/net/netlink.h
84054@@ -521,7 +521,7 @@ static inline void *nlmsg_get_pos(struct sk_buff *skb)
84055 static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
84056 {
84057 if (mark)
84058- skb_trim(skb, (unsigned char *) mark - skb->data);
84059+ skb_trim(skb, (const unsigned char *) mark - skb->data);
84060 }
84061
84062 /**
84063diff --git a/include/net/netns/conntrack.h b/include/net/netns/conntrack.h
84064index fbcc7fa..03c7e51 100644
84065--- a/include/net/netns/conntrack.h
84066+++ b/include/net/netns/conntrack.h
84067@@ -12,10 +12,10 @@ struct nf_conntrack_ecache;
84068 struct nf_proto_net {
84069 #ifdef CONFIG_SYSCTL
84070 struct ctl_table_header *ctl_table_header;
84071- struct ctl_table *ctl_table;
84072+ ctl_table_no_const *ctl_table;
84073 #ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT
84074 struct ctl_table_header *ctl_compat_header;
84075- struct ctl_table *ctl_compat_table;
84076+ ctl_table_no_const *ctl_compat_table;
84077 #endif
84078 #endif
84079 unsigned int users;
84080@@ -58,7 +58,7 @@ struct nf_ip_net {
84081 struct nf_icmp_net icmpv6;
84082 #if defined(CONFIG_SYSCTL) && defined(CONFIG_NF_CONNTRACK_PROC_COMPAT)
84083 struct ctl_table_header *ctl_table_header;
84084- struct ctl_table *ctl_table;
84085+ ctl_table_no_const *ctl_table;
84086 #endif
84087 };
84088
84089diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
84090index 80f500a..f0c23c2 100644
84091--- a/include/net/netns/ipv4.h
84092+++ b/include/net/netns/ipv4.h
84093@@ -74,7 +74,7 @@ struct netns_ipv4 {
84094
84095 kgid_t sysctl_ping_group_range[2];
84096
84097- atomic_t dev_addr_genid;
84098+ atomic_unchecked_t dev_addr_genid;
84099
84100 #ifdef CONFIG_IP_MROUTE
84101 #ifndef CONFIG_IP_MROUTE_MULTIPLE_TABLES
84102@@ -84,6 +84,6 @@ struct netns_ipv4 {
84103 struct fib_rules_ops *mr_rules_ops;
84104 #endif
84105 #endif
84106- atomic_t rt_genid;
84107+ atomic_unchecked_t rt_genid;
84108 };
84109 #endif
84110diff --git a/include/net/netns/ipv6.h b/include/net/netns/ipv6.h
84111index 21edaf1..4c5faae 100644
84112--- a/include/net/netns/ipv6.h
84113+++ b/include/net/netns/ipv6.h
84114@@ -73,8 +73,8 @@ struct netns_ipv6 {
84115 struct fib_rules_ops *mr6_rules_ops;
84116 #endif
84117 #endif
84118- atomic_t dev_addr_genid;
84119- atomic_t rt_genid;
84120+ atomic_unchecked_t dev_addr_genid;
84121+ atomic_unchecked_t rt_genid;
84122 };
84123
84124 #if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
84125diff --git a/include/net/ping.h b/include/net/ping.h
84126index 026479b..d9b2829 100644
84127--- a/include/net/ping.h
84128+++ b/include/net/ping.h
84129@@ -54,7 +54,7 @@ struct ping_iter_state {
84130
84131 extern struct proto ping_prot;
84132 #if IS_ENABLED(CONFIG_IPV6)
84133-extern struct pingv6_ops pingv6_ops;
84134+extern struct pingv6_ops *pingv6_ops;
84135 #endif
84136
84137 struct pingfakehdr {
84138diff --git a/include/net/protocol.h b/include/net/protocol.h
84139index a7e986b..dc67bce 100644
84140--- a/include/net/protocol.h
84141+++ b/include/net/protocol.h
84142@@ -49,7 +49,7 @@ struct net_protocol {
84143 * socket lookup?
84144 */
84145 icmp_strict_tag_validation:1;
84146-};
84147+} __do_const;
84148
84149 #if IS_ENABLED(CONFIG_IPV6)
84150 struct inet6_protocol {
84151@@ -62,7 +62,7 @@ struct inet6_protocol {
84152 u8 type, u8 code, int offset,
84153 __be32 info);
84154 unsigned int flags; /* INET6_PROTO_xxx */
84155-};
84156+} __do_const;
84157
84158 #define INET6_PROTO_NOPOLICY 0x1
84159 #define INET6_PROTO_FINAL 0x2
84160diff --git a/include/net/rtnetlink.h b/include/net/rtnetlink.h
84161index 661e45d..54c39df 100644
84162--- a/include/net/rtnetlink.h
84163+++ b/include/net/rtnetlink.h
84164@@ -93,7 +93,7 @@ struct rtnl_link_ops {
84165 int (*fill_slave_info)(struct sk_buff *skb,
84166 const struct net_device *dev,
84167 const struct net_device *slave_dev);
84168-};
84169+} __do_const;
84170
84171 int __rtnl_link_register(struct rtnl_link_ops *ops);
84172 void __rtnl_link_unregister(struct rtnl_link_ops *ops);
84173diff --git a/include/net/sctp/checksum.h b/include/net/sctp/checksum.h
84174index 4a5b9a3..ca27d73 100644
84175--- a/include/net/sctp/checksum.h
84176+++ b/include/net/sctp/checksum.h
84177@@ -61,8 +61,8 @@ static inline __le32 sctp_compute_cksum(const struct sk_buff *skb,
84178 unsigned int offset)
84179 {
84180 struct sctphdr *sh = sctp_hdr(skb);
84181- __le32 ret, old = sh->checksum;
84182- const struct skb_checksum_ops ops = {
84183+ __le32 ret, old = sh->checksum;
84184+ static const struct skb_checksum_ops ops = {
84185 .update = sctp_csum_update,
84186 .combine = sctp_csum_combine,
84187 };
84188diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h
84189index 7f4eeb3..37e8fe1 100644
84190--- a/include/net/sctp/sm.h
84191+++ b/include/net/sctp/sm.h
84192@@ -80,7 +80,7 @@ typedef void (sctp_timer_event_t) (unsigned long);
84193 typedef struct {
84194 sctp_state_fn_t *fn;
84195 const char *name;
84196-} sctp_sm_table_entry_t;
84197+} __do_const sctp_sm_table_entry_t;
84198
84199 /* A naming convention of "sctp_sf_xxx" applies to all the state functions
84200 * currently in use.
84201@@ -292,7 +292,7 @@ __u32 sctp_generate_tag(const struct sctp_endpoint *);
84202 __u32 sctp_generate_tsn(const struct sctp_endpoint *);
84203
84204 /* Extern declarations for major data structures. */
84205-extern sctp_timer_event_t *sctp_timer_events[SCTP_NUM_TIMEOUT_TYPES];
84206+extern sctp_timer_event_t * const sctp_timer_events[SCTP_NUM_TIMEOUT_TYPES];
84207
84208
84209 /* Get the size of a DATA chunk payload. */
84210diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
84211index 0dfcc92..7967849 100644
84212--- a/include/net/sctp/structs.h
84213+++ b/include/net/sctp/structs.h
84214@@ -507,7 +507,7 @@ struct sctp_pf {
84215 struct sctp_association *asoc);
84216 void (*addr_v4map) (struct sctp_sock *, union sctp_addr *);
84217 struct sctp_af *af;
84218-};
84219+} __do_const;
84220
84221
84222 /* Structure to track chunk fragments that have been acked, but peer
84223diff --git a/include/net/sock.h b/include/net/sock.h
84224index b9586a1..b2948c0 100644
84225--- a/include/net/sock.h
84226+++ b/include/net/sock.h
84227@@ -348,7 +348,7 @@ struct sock {
84228 unsigned int sk_napi_id;
84229 unsigned int sk_ll_usec;
84230 #endif
84231- atomic_t sk_drops;
84232+ atomic_unchecked_t sk_drops;
84233 int sk_rcvbuf;
84234
84235 struct sk_filter __rcu *sk_filter;
84236@@ -1036,7 +1036,7 @@ struct proto {
84237 void (*destroy_cgroup)(struct mem_cgroup *memcg);
84238 struct cg_proto *(*proto_cgroup)(struct mem_cgroup *memcg);
84239 #endif
84240-};
84241+} __randomize_layout;
84242
84243 /*
84244 * Bits in struct cg_proto.flags
84245@@ -1223,7 +1223,7 @@ static inline u64 memcg_memory_allocated_read(struct cg_proto *prot)
84246 return ret >> PAGE_SHIFT;
84247 }
84248
84249-static inline long
84250+static inline long __intentional_overflow(-1)
84251 sk_memory_allocated(const struct sock *sk)
84252 {
84253 struct proto *prot = sk->sk_prot;
84254@@ -1368,7 +1368,7 @@ struct sock_iocb {
84255 struct scm_cookie *scm;
84256 struct msghdr *msg, async_msg;
84257 struct kiocb *kiocb;
84258-};
84259+} __randomize_layout;
84260
84261 static inline struct sock_iocb *kiocb_to_siocb(struct kiocb *iocb)
84262 {
84263@@ -1830,7 +1830,7 @@ static inline void sk_nocaps_add(struct sock *sk, netdev_features_t flags)
84264 }
84265
84266 static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb,
84267- char __user *from, char *to,
84268+ char __user *from, unsigned char *to,
84269 int copy, int offset)
84270 {
84271 if (skb->ip_summed == CHECKSUM_NONE) {
84272@@ -2092,7 +2092,7 @@ static inline void sk_stream_moderate_sndbuf(struct sock *sk)
84273 }
84274 }
84275
84276-struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp);
84277+struct sk_buff * __intentional_overflow(0) sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp);
84278
84279 /**
84280 * sk_page_frag - return an appropriate page_frag
84281diff --git a/include/net/tcp.h b/include/net/tcp.h
84282index 743acce..44a58b0 100644
84283--- a/include/net/tcp.h
84284+++ b/include/net/tcp.h
84285@@ -541,7 +541,7 @@ void tcp_retransmit_timer(struct sock *sk);
84286 void tcp_xmit_retransmit_queue(struct sock *);
84287 void tcp_simple_retransmit(struct sock *);
84288 int tcp_trim_head(struct sock *, struct sk_buff *, u32);
84289-int tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int);
84290+int __intentional_overflow(3) tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int);
84291
84292 void tcp_send_probe0(struct sock *);
84293 void tcp_send_partial(struct sock *);
84294@@ -710,8 +710,8 @@ struct tcp_skb_cb {
84295 struct inet6_skb_parm h6;
84296 #endif
84297 } header; /* For incoming frames */
84298- __u32 seq; /* Starting sequence number */
84299- __u32 end_seq; /* SEQ + FIN + SYN + datalen */
84300+ __u32 seq __intentional_overflow(0); /* Starting sequence number */
84301+ __u32 end_seq __intentional_overflow(0); /* SEQ + FIN + SYN + datalen */
84302 __u32 when; /* used to compute rtt's */
84303 __u8 tcp_flags; /* TCP header flags. (tcp[13]) */
84304
84305@@ -725,7 +725,7 @@ struct tcp_skb_cb {
84306
84307 __u8 ip_dsfield; /* IPv4 tos or IPv6 dsfield */
84308 /* 1 byte hole */
84309- __u32 ack_seq; /* Sequence number ACK'd */
84310+ __u32 ack_seq __intentional_overflow(0); /* Sequence number ACK'd */
84311 };
84312
84313 #define TCP_SKB_CB(__skb) ((struct tcp_skb_cb *)&((__skb)->cb[0]))
84314diff --git a/include/net/xfrm.h b/include/net/xfrm.h
84315index fb5654a..4457522 100644
84316--- a/include/net/xfrm.h
84317+++ b/include/net/xfrm.h
84318@@ -286,7 +286,6 @@ struct xfrm_dst;
84319 struct xfrm_policy_afinfo {
84320 unsigned short family;
84321 struct dst_ops *dst_ops;
84322- void (*garbage_collect)(struct net *net);
84323 struct dst_entry *(*dst_lookup)(struct net *net, int tos,
84324 const xfrm_address_t *saddr,
84325 const xfrm_address_t *daddr);
84326@@ -304,7 +303,7 @@ struct xfrm_policy_afinfo {
84327 struct net_device *dev,
84328 const struct flowi *fl);
84329 struct dst_entry *(*blackhole_route)(struct net *net, struct dst_entry *orig);
84330-};
84331+} __do_const;
84332
84333 int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo);
84334 int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo);
84335@@ -343,7 +342,7 @@ struct xfrm_state_afinfo {
84336 int (*transport_finish)(struct sk_buff *skb,
84337 int async);
84338 void (*local_error)(struct sk_buff *skb, u32 mtu);
84339-};
84340+} __do_const;
84341
84342 int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo);
84343 int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo);
84344@@ -428,7 +427,7 @@ struct xfrm_mode {
84345 struct module *owner;
84346 unsigned int encap;
84347 int flags;
84348-};
84349+} __do_const;
84350
84351 /* Flags for xfrm_mode. */
84352 enum {
84353@@ -525,7 +524,7 @@ struct xfrm_policy {
84354 struct timer_list timer;
84355
84356 struct flow_cache_object flo;
84357- atomic_t genid;
84358+ atomic_unchecked_t genid;
84359 u32 priority;
84360 u32 index;
84361 struct xfrm_mark mark;
84362@@ -1165,6 +1164,7 @@ static inline void xfrm_sk_free_policy(struct sock *sk)
84363 }
84364
84365 void xfrm_garbage_collect(struct net *net);
84366+void xfrm_garbage_collect_deferred(struct net *net);
84367
84368 #else
84369
84370@@ -1203,6 +1203,9 @@ static inline int xfrm6_policy_check_reverse(struct sock *sk, int dir,
84371 static inline void xfrm_garbage_collect(struct net *net)
84372 {
84373 }
84374+static inline void xfrm_garbage_collect_deferred(struct net *net)
84375+{
84376+}
84377 #endif
84378
84379 static __inline__
84380diff --git a/include/rdma/iw_cm.h b/include/rdma/iw_cm.h
84381index 1017e0b..227aa4d 100644
84382--- a/include/rdma/iw_cm.h
84383+++ b/include/rdma/iw_cm.h
84384@@ -122,7 +122,7 @@ struct iw_cm_verbs {
84385 int backlog);
84386
84387 int (*destroy_listen)(struct iw_cm_id *cm_id);
84388-};
84389+} __no_const;
84390
84391 /**
84392 * iw_create_cm_id - Create an IW CM identifier.
84393diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h
84394index 52beadf..598734c 100644
84395--- a/include/scsi/libfc.h
84396+++ b/include/scsi/libfc.h
84397@@ -771,6 +771,7 @@ struct libfc_function_template {
84398 */
84399 void (*disc_stop_final) (struct fc_lport *);
84400 };
84401+typedef struct libfc_function_template __no_const libfc_function_template_no_const;
84402
84403 /**
84404 * struct fc_disc - Discovery context
84405@@ -875,7 +876,7 @@ struct fc_lport {
84406 struct fc_vport *vport;
84407
84408 /* Operational Information */
84409- struct libfc_function_template tt;
84410+ libfc_function_template_no_const tt;
84411 u8 link_up;
84412 u8 qfull;
84413 enum fc_lport_state state;
84414diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
84415index b4f1eff..7fdbd46 100644
84416--- a/include/scsi/scsi_device.h
84417+++ b/include/scsi/scsi_device.h
84418@@ -180,9 +180,9 @@ struct scsi_device {
84419 unsigned int max_device_blocked; /* what device_blocked counts down from */
84420 #define SCSI_DEFAULT_DEVICE_BLOCKED 3
84421
84422- atomic_t iorequest_cnt;
84423- atomic_t iodone_cnt;
84424- atomic_t ioerr_cnt;
84425+ atomic_unchecked_t iorequest_cnt;
84426+ atomic_unchecked_t iodone_cnt;
84427+ atomic_unchecked_t ioerr_cnt;
84428
84429 struct device sdev_gendev,
84430 sdev_dev;
84431diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h
84432index b797e8f..8e2c3aa 100644
84433--- a/include/scsi/scsi_transport_fc.h
84434+++ b/include/scsi/scsi_transport_fc.h
84435@@ -751,7 +751,8 @@ struct fc_function_template {
84436 unsigned long show_host_system_hostname:1;
84437
84438 unsigned long disable_target_scan:1;
84439-};
84440+} __do_const;
84441+typedef struct fc_function_template __no_const fc_function_template_no_const;
84442
84443
84444 /**
84445diff --git a/include/sound/compress_driver.h b/include/sound/compress_driver.h
84446index ae6c3b8..fd748ac 100644
84447--- a/include/sound/compress_driver.h
84448+++ b/include/sound/compress_driver.h
84449@@ -128,7 +128,7 @@ struct snd_compr_ops {
84450 struct snd_compr_caps *caps);
84451 int (*get_codec_caps) (struct snd_compr_stream *stream,
84452 struct snd_compr_codec_caps *codec);
84453-};
84454+} __no_const;
84455
84456 /**
84457 * struct snd_compr: Compressed device
84458diff --git a/include/sound/soc.h b/include/sound/soc.h
84459index 9a00147..d814573 100644
84460--- a/include/sound/soc.h
84461+++ b/include/sound/soc.h
84462@@ -770,7 +770,7 @@ struct snd_soc_codec_driver {
84463 /* probe ordering - for components with runtime dependencies */
84464 int probe_order;
84465 int remove_order;
84466-};
84467+} __do_const;
84468
84469 /* SoC platform interface */
84470 struct snd_soc_platform_driver {
84471@@ -816,7 +816,7 @@ struct snd_soc_platform_driver {
84472 unsigned int (*read)(struct snd_soc_platform *, unsigned int);
84473 int (*write)(struct snd_soc_platform *, unsigned int, unsigned int);
84474 int (*bespoke_trigger)(struct snd_pcm_substream *, int);
84475-};
84476+} __do_const;
84477
84478 struct snd_soc_platform {
84479 const char *name;
84480diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
84481index 1772fad..282e3e2 100644
84482--- a/include/target/target_core_base.h
84483+++ b/include/target/target_core_base.h
84484@@ -754,7 +754,7 @@ struct se_device {
84485 atomic_long_t write_bytes;
84486 /* Active commands on this virtual SE device */
84487 atomic_t simple_cmds;
84488- atomic_t dev_ordered_id;
84489+ atomic_unchecked_t dev_ordered_id;
84490 atomic_t dev_ordered_sync;
84491 atomic_t dev_qf_count;
84492 int export_count;
84493diff --git a/include/trace/events/fs.h b/include/trace/events/fs.h
84494new file mode 100644
84495index 0000000..fb634b7
84496--- /dev/null
84497+++ b/include/trace/events/fs.h
84498@@ -0,0 +1,53 @@
84499+#undef TRACE_SYSTEM
84500+#define TRACE_SYSTEM fs
84501+
84502+#if !defined(_TRACE_FS_H) || defined(TRACE_HEADER_MULTI_READ)
84503+#define _TRACE_FS_H
84504+
84505+#include <linux/fs.h>
84506+#include <linux/tracepoint.h>
84507+
84508+TRACE_EVENT(do_sys_open,
84509+
84510+ TP_PROTO(const char *filename, int flags, int mode),
84511+
84512+ TP_ARGS(filename, flags, mode),
84513+
84514+ TP_STRUCT__entry(
84515+ __string( filename, filename )
84516+ __field( int, flags )
84517+ __field( int, mode )
84518+ ),
84519+
84520+ TP_fast_assign(
84521+ __assign_str(filename, filename);
84522+ __entry->flags = flags;
84523+ __entry->mode = mode;
84524+ ),
84525+
84526+ TP_printk("\"%s\" %x %o",
84527+ __get_str(filename), __entry->flags, __entry->mode)
84528+);
84529+
84530+TRACE_EVENT(open_exec,
84531+
84532+ TP_PROTO(const char *filename),
84533+
84534+ TP_ARGS(filename),
84535+
84536+ TP_STRUCT__entry(
84537+ __string( filename, filename )
84538+ ),
84539+
84540+ TP_fast_assign(
84541+ __assign_str(filename, filename);
84542+ ),
84543+
84544+ TP_printk("\"%s\"",
84545+ __get_str(filename))
84546+);
84547+
84548+#endif /* _TRACE_FS_H */
84549+
84550+/* This part must be outside protection */
84551+#include <trace/define_trace.h>
84552diff --git a/include/trace/events/irq.h b/include/trace/events/irq.h
84553index 1c09820..7f5ec79 100644
84554--- a/include/trace/events/irq.h
84555+++ b/include/trace/events/irq.h
84556@@ -36,7 +36,7 @@ struct softirq_action;
84557 */
84558 TRACE_EVENT(irq_handler_entry,
84559
84560- TP_PROTO(int irq, struct irqaction *action),
84561+ TP_PROTO(int irq, const struct irqaction *action),
84562
84563 TP_ARGS(irq, action),
84564
84565@@ -66,7 +66,7 @@ TRACE_EVENT(irq_handler_entry,
84566 */
84567 TRACE_EVENT(irq_handler_exit,
84568
84569- TP_PROTO(int irq, struct irqaction *action, int ret),
84570+ TP_PROTO(int irq, const struct irqaction *action, int ret),
84571
84572 TP_ARGS(irq, action, ret),
84573
84574diff --git a/include/uapi/linux/a.out.h b/include/uapi/linux/a.out.h
84575index 7caf44c..23c6f27 100644
84576--- a/include/uapi/linux/a.out.h
84577+++ b/include/uapi/linux/a.out.h
84578@@ -39,6 +39,14 @@ enum machine_type {
84579 M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
84580 };
84581
84582+/* Constants for the N_FLAGS field */
84583+#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
84584+#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
84585+#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
84586+#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
84587+/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
84588+#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
84589+
84590 #if !defined (N_MAGIC)
84591 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
84592 #endif
84593diff --git a/include/uapi/linux/bcache.h b/include/uapi/linux/bcache.h
84594index 22b6ad3..aeba37e 100644
84595--- a/include/uapi/linux/bcache.h
84596+++ b/include/uapi/linux/bcache.h
84597@@ -5,6 +5,7 @@
84598 * Bcache on disk data structures
84599 */
84600
84601+#include <linux/compiler.h>
84602 #include <asm/types.h>
84603
84604 #define BITMASK(name, type, field, offset, size) \
84605@@ -20,8 +21,8 @@ static inline void SET_##name(type *k, __u64 v) \
84606 /* Btree keys - all units are in sectors */
84607
84608 struct bkey {
84609- __u64 high;
84610- __u64 low;
84611+ __u64 high __intentional_overflow(-1);
84612+ __u64 low __intentional_overflow(-1);
84613 __u64 ptr[];
84614 };
84615
84616diff --git a/include/uapi/linux/byteorder/little_endian.h b/include/uapi/linux/byteorder/little_endian.h
84617index d876736..ccce5c0 100644
84618--- a/include/uapi/linux/byteorder/little_endian.h
84619+++ b/include/uapi/linux/byteorder/little_endian.h
84620@@ -42,51 +42,51 @@
84621
84622 static inline __le64 __cpu_to_le64p(const __u64 *p)
84623 {
84624- return (__force __le64)*p;
84625+ return (__force const __le64)*p;
84626 }
84627-static inline __u64 __le64_to_cpup(const __le64 *p)
84628+static inline __u64 __intentional_overflow(-1) __le64_to_cpup(const __le64 *p)
84629 {
84630- return (__force __u64)*p;
84631+ return (__force const __u64)*p;
84632 }
84633 static inline __le32 __cpu_to_le32p(const __u32 *p)
84634 {
84635- return (__force __le32)*p;
84636+ return (__force const __le32)*p;
84637 }
84638 static inline __u32 __le32_to_cpup(const __le32 *p)
84639 {
84640- return (__force __u32)*p;
84641+ return (__force const __u32)*p;
84642 }
84643 static inline __le16 __cpu_to_le16p(const __u16 *p)
84644 {
84645- return (__force __le16)*p;
84646+ return (__force const __le16)*p;
84647 }
84648 static inline __u16 __le16_to_cpup(const __le16 *p)
84649 {
84650- return (__force __u16)*p;
84651+ return (__force const __u16)*p;
84652 }
84653 static inline __be64 __cpu_to_be64p(const __u64 *p)
84654 {
84655- return (__force __be64)__swab64p(p);
84656+ return (__force const __be64)__swab64p(p);
84657 }
84658 static inline __u64 __be64_to_cpup(const __be64 *p)
84659 {
84660- return __swab64p((__u64 *)p);
84661+ return __swab64p((const __u64 *)p);
84662 }
84663 static inline __be32 __cpu_to_be32p(const __u32 *p)
84664 {
84665- return (__force __be32)__swab32p(p);
84666+ return (__force const __be32)__swab32p(p);
84667 }
84668-static inline __u32 __be32_to_cpup(const __be32 *p)
84669+static inline __u32 __intentional_overflow(-1) __be32_to_cpup(const __be32 *p)
84670 {
84671- return __swab32p((__u32 *)p);
84672+ return __swab32p((const __u32 *)p);
84673 }
84674 static inline __be16 __cpu_to_be16p(const __u16 *p)
84675 {
84676- return (__force __be16)__swab16p(p);
84677+ return (__force const __be16)__swab16p(p);
84678 }
84679 static inline __u16 __be16_to_cpup(const __be16 *p)
84680 {
84681- return __swab16p((__u16 *)p);
84682+ return __swab16p((const __u16 *)p);
84683 }
84684 #define __cpu_to_le64s(x) do { (void)(x); } while (0)
84685 #define __le64_to_cpus(x) do { (void)(x); } while (0)
84686diff --git a/include/uapi/linux/elf.h b/include/uapi/linux/elf.h
84687index ef6103b..d4e65dd 100644
84688--- a/include/uapi/linux/elf.h
84689+++ b/include/uapi/linux/elf.h
84690@@ -37,6 +37,17 @@ typedef __s64 Elf64_Sxword;
84691 #define PT_GNU_EH_FRAME 0x6474e550
84692
84693 #define PT_GNU_STACK (PT_LOOS + 0x474e551)
84694+#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
84695+
84696+#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
84697+
84698+/* Constants for the e_flags field */
84699+#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
84700+#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
84701+#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
84702+#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
84703+/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
84704+#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
84705
84706 /*
84707 * Extended Numbering
84708@@ -94,6 +105,8 @@ typedef __s64 Elf64_Sxword;
84709 #define DT_DEBUG 21
84710 #define DT_TEXTREL 22
84711 #define DT_JMPREL 23
84712+#define DT_FLAGS 30
84713+ #define DF_TEXTREL 0x00000004
84714 #define DT_ENCODING 32
84715 #define OLD_DT_LOOS 0x60000000
84716 #define DT_LOOS 0x6000000d
84717@@ -240,6 +253,19 @@ typedef struct elf64_hdr {
84718 #define PF_W 0x2
84719 #define PF_X 0x1
84720
84721+#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */
84722+#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */
84723+#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */
84724+#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */
84725+#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */
84726+#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */
84727+/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */
84728+/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */
84729+#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */
84730+#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */
84731+#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */
84732+#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */
84733+
84734 typedef struct elf32_phdr{
84735 Elf32_Word p_type;
84736 Elf32_Off p_offset;
84737@@ -332,6 +358,8 @@ typedef struct elf64_shdr {
84738 #define EI_OSABI 7
84739 #define EI_PAD 8
84740
84741+#define EI_PAX 14
84742+
84743 #define ELFMAG0 0x7f /* EI_MAG */
84744 #define ELFMAG1 'E'
84745 #define ELFMAG2 'L'
84746diff --git a/include/uapi/linux/personality.h b/include/uapi/linux/personality.h
84747index aa169c4..6a2771d 100644
84748--- a/include/uapi/linux/personality.h
84749+++ b/include/uapi/linux/personality.h
84750@@ -30,6 +30,7 @@ enum {
84751 #define PER_CLEAR_ON_SETID (READ_IMPLIES_EXEC | \
84752 ADDR_NO_RANDOMIZE | \
84753 ADDR_COMPAT_LAYOUT | \
84754+ ADDR_LIMIT_3GB | \
84755 MMAP_PAGE_ZERO)
84756
84757 /*
84758diff --git a/include/uapi/linux/screen_info.h b/include/uapi/linux/screen_info.h
84759index 7530e74..e714828 100644
84760--- a/include/uapi/linux/screen_info.h
84761+++ b/include/uapi/linux/screen_info.h
84762@@ -43,7 +43,8 @@ struct screen_info {
84763 __u16 pages; /* 0x32 */
84764 __u16 vesa_attributes; /* 0x34 */
84765 __u32 capabilities; /* 0x36 */
84766- __u8 _reserved[6]; /* 0x3a */
84767+ __u16 vesapm_size; /* 0x3a */
84768+ __u8 _reserved[4]; /* 0x3c */
84769 } __attribute__((packed));
84770
84771 #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
84772diff --git a/include/uapi/linux/swab.h b/include/uapi/linux/swab.h
84773index 0e011eb..82681b1 100644
84774--- a/include/uapi/linux/swab.h
84775+++ b/include/uapi/linux/swab.h
84776@@ -43,7 +43,7 @@
84777 * ___swab16, ___swab32, ___swab64, ___swahw32, ___swahb32
84778 */
84779
84780-static inline __attribute_const__ __u16 __fswab16(__u16 val)
84781+static inline __intentional_overflow(-1) __attribute_const__ __u16 __fswab16(__u16 val)
84782 {
84783 #ifdef __HAVE_BUILTIN_BSWAP16__
84784 return __builtin_bswap16(val);
84785@@ -54,7 +54,7 @@ static inline __attribute_const__ __u16 __fswab16(__u16 val)
84786 #endif
84787 }
84788
84789-static inline __attribute_const__ __u32 __fswab32(__u32 val)
84790+static inline __intentional_overflow(-1) __attribute_const__ __u32 __fswab32(__u32 val)
84791 {
84792 #ifdef __HAVE_BUILTIN_BSWAP32__
84793 return __builtin_bswap32(val);
84794@@ -65,7 +65,7 @@ static inline __attribute_const__ __u32 __fswab32(__u32 val)
84795 #endif
84796 }
84797
84798-static inline __attribute_const__ __u64 __fswab64(__u64 val)
84799+static inline __intentional_overflow(-1) __attribute_const__ __u64 __fswab64(__u64 val)
84800 {
84801 #ifdef __HAVE_BUILTIN_BSWAP64__
84802 return __builtin_bswap64(val);
84803diff --git a/include/uapi/linux/sysctl.h b/include/uapi/linux/sysctl.h
84804index 6d67213..552fdd9 100644
84805--- a/include/uapi/linux/sysctl.h
84806+++ b/include/uapi/linux/sysctl.h
84807@@ -155,8 +155,6 @@ enum
84808 KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */
84809 };
84810
84811-
84812-
84813 /* CTL_VM names: */
84814 enum
84815 {
84816diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h
84817index fe94bb9..c9e51c2 100644
84818--- a/include/uapi/linux/videodev2.h
84819+++ b/include/uapi/linux/videodev2.h
84820@@ -1227,7 +1227,7 @@ struct v4l2_ext_control {
84821 union {
84822 __s32 value;
84823 __s64 value64;
84824- char *string;
84825+ char __user *string;
84826 };
84827 } __attribute__ ((packed));
84828
84829diff --git a/include/uapi/linux/xattr.h b/include/uapi/linux/xattr.h
84830index c38355c..17a57bc 100644
84831--- a/include/uapi/linux/xattr.h
84832+++ b/include/uapi/linux/xattr.h
84833@@ -73,5 +73,9 @@
84834 #define XATTR_POSIX_ACL_DEFAULT "posix_acl_default"
84835 #define XATTR_NAME_POSIX_ACL_DEFAULT XATTR_SYSTEM_PREFIX XATTR_POSIX_ACL_DEFAULT
84836
84837+/* User namespace */
84838+#define XATTR_PAX_PREFIX XATTR_USER_PREFIX "pax."
84839+#define XATTR_PAX_FLAGS_SUFFIX "flags"
84840+#define XATTR_NAME_PAX_FLAGS XATTR_PAX_PREFIX XATTR_PAX_FLAGS_SUFFIX
84841
84842 #endif /* _UAPI_LINUX_XATTR_H */
84843diff --git a/include/video/udlfb.h b/include/video/udlfb.h
84844index f9466fa..f4e2b81 100644
84845--- a/include/video/udlfb.h
84846+++ b/include/video/udlfb.h
84847@@ -53,10 +53,10 @@ struct dlfb_data {
84848 u32 pseudo_palette[256];
84849 int blank_mode; /*one of FB_BLANK_ */
84850 /* blit-only rendering path metrics, exposed through sysfs */
84851- atomic_t bytes_rendered; /* raw pixel-bytes driver asked to render */
84852- atomic_t bytes_identical; /* saved effort with backbuffer comparison */
84853- atomic_t bytes_sent; /* to usb, after compression including overhead */
84854- atomic_t cpu_kcycles_used; /* transpired during pixel processing */
84855+ atomic_unchecked_t bytes_rendered; /* raw pixel-bytes driver asked to render */
84856+ atomic_unchecked_t bytes_identical; /* saved effort with backbuffer comparison */
84857+ atomic_unchecked_t bytes_sent; /* to usb, after compression including overhead */
84858+ atomic_unchecked_t cpu_kcycles_used; /* transpired during pixel processing */
84859 };
84860
84861 #define NR_USB_REQUEST_I2C_SUB_IO 0x02
84862diff --git a/include/video/uvesafb.h b/include/video/uvesafb.h
84863index 30f5362..8ed8ac9 100644
84864--- a/include/video/uvesafb.h
84865+++ b/include/video/uvesafb.h
84866@@ -122,6 +122,7 @@ struct uvesafb_par {
84867 u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */
84868 u8 pmi_setpal; /* PMI for palette changes */
84869 u16 *pmi_base; /* protected mode interface location */
84870+ u8 *pmi_code; /* protected mode code location */
84871 void *pmi_start;
84872 void *pmi_pal;
84873 u8 *vbe_state_orig; /*
84874diff --git a/init/Kconfig b/init/Kconfig
84875index 93c5ef0..ac92caa 100644
84876--- a/init/Kconfig
84877+++ b/init/Kconfig
84878@@ -1079,6 +1079,7 @@ endif # CGROUPS
84879
84880 config CHECKPOINT_RESTORE
84881 bool "Checkpoint/restore support" if EXPERT
84882+ depends on !GRKERNSEC
84883 default n
84884 help
84885 Enables additional kernel features in a sake of checkpoint/restore.
84886@@ -1545,7 +1546,7 @@ config SLUB_DEBUG
84887
84888 config COMPAT_BRK
84889 bool "Disable heap randomization"
84890- default y
84891+ default n
84892 help
84893 Randomizing heap placement makes heap exploits harder, but it
84894 also breaks ancient binaries (including anything libc5 based).
84895@@ -1833,7 +1834,7 @@ config INIT_ALL_POSSIBLE
84896 config STOP_MACHINE
84897 bool
84898 default y
84899- depends on (SMP && MODULE_UNLOAD) || HOTPLUG_CPU
84900+ depends on (SMP && MODULE_UNLOAD) || HOTPLUG_CPU || GRKERNSEC
84901 help
84902 Need stop_machine() primitive.
84903
84904diff --git a/init/Makefile b/init/Makefile
84905index 7bc47ee..6da2dc7 100644
84906--- a/init/Makefile
84907+++ b/init/Makefile
84908@@ -2,6 +2,9 @@
84909 # Makefile for the linux kernel.
84910 #
84911
84912+ccflags-y := $(GCC_PLUGINS_CFLAGS)
84913+asflags-y := $(GCC_PLUGINS_AFLAGS)
84914+
84915 obj-y := main.o version.o mounts.o
84916 ifneq ($(CONFIG_BLK_DEV_INITRD),y)
84917 obj-y += noinitramfs.o
84918diff --git a/init/do_mounts.c b/init/do_mounts.c
84919index 8e5addc..c96ea61 100644
84920--- a/init/do_mounts.c
84921+++ b/init/do_mounts.c
84922@@ -359,11 +359,11 @@ static void __init get_fs_names(char *page)
84923 static int __init do_mount_root(char *name, char *fs, int flags, void *data)
84924 {
84925 struct super_block *s;
84926- int err = sys_mount(name, "/root", fs, flags, data);
84927+ int err = sys_mount((char __force_user *)name, (char __force_user *)"/root", (char __force_user *)fs, flags, (void __force_user *)data);
84928 if (err)
84929 return err;
84930
84931- sys_chdir("/root");
84932+ sys_chdir((const char __force_user *)"/root");
84933 s = current->fs->pwd.dentry->d_sb;
84934 ROOT_DEV = s->s_dev;
84935 printk(KERN_INFO
84936@@ -484,18 +484,18 @@ void __init change_floppy(char *fmt, ...)
84937 va_start(args, fmt);
84938 vsprintf(buf, fmt, args);
84939 va_end(args);
84940- fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
84941+ fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
84942 if (fd >= 0) {
84943 sys_ioctl(fd, FDEJECT, 0);
84944 sys_close(fd);
84945 }
84946 printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
84947- fd = sys_open("/dev/console", O_RDWR, 0);
84948+ fd = sys_open((__force const char __user *)"/dev/console", O_RDWR, 0);
84949 if (fd >= 0) {
84950 sys_ioctl(fd, TCGETS, (long)&termios);
84951 termios.c_lflag &= ~ICANON;
84952 sys_ioctl(fd, TCSETSF, (long)&termios);
84953- sys_read(fd, &c, 1);
84954+ sys_read(fd, (char __user *)&c, 1);
84955 termios.c_lflag |= ICANON;
84956 sys_ioctl(fd, TCSETSF, (long)&termios);
84957 sys_close(fd);
84958@@ -589,8 +589,8 @@ void __init prepare_namespace(void)
84959 mount_root();
84960 out:
84961 devtmpfs_mount("dev");
84962- sys_mount(".", "/", NULL, MS_MOVE, NULL);
84963- sys_chroot(".");
84964+ sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
84965+ sys_chroot((const char __force_user *)".");
84966 }
84967
84968 static bool is_tmpfs;
84969diff --git a/init/do_mounts.h b/init/do_mounts.h
84970index f5b978a..69dbfe8 100644
84971--- a/init/do_mounts.h
84972+++ b/init/do_mounts.h
84973@@ -15,15 +15,15 @@ extern int root_mountflags;
84974
84975 static inline int create_dev(char *name, dev_t dev)
84976 {
84977- sys_unlink(name);
84978- return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
84979+ sys_unlink((char __force_user *)name);
84980+ return sys_mknod((char __force_user *)name, S_IFBLK|0600, new_encode_dev(dev));
84981 }
84982
84983 #if BITS_PER_LONG == 32
84984 static inline u32 bstat(char *name)
84985 {
84986 struct stat64 stat;
84987- if (sys_stat64(name, &stat) != 0)
84988+ if (sys_stat64((char __force_user *)name, (struct stat64 __force_user *)&stat) != 0)
84989 return 0;
84990 if (!S_ISBLK(stat.st_mode))
84991 return 0;
84992@@ -35,7 +35,7 @@ static inline u32 bstat(char *name)
84993 static inline u32 bstat(char *name)
84994 {
84995 struct stat stat;
84996- if (sys_newstat(name, &stat) != 0)
84997+ if (sys_newstat((const char __force_user *)name, (struct stat __force_user *)&stat) != 0)
84998 return 0;
84999 if (!S_ISBLK(stat.st_mode))
85000 return 0;
85001diff --git a/init/do_mounts_initrd.c b/init/do_mounts_initrd.c
85002index 3e0878e..8a9d7a0 100644
85003--- a/init/do_mounts_initrd.c
85004+++ b/init/do_mounts_initrd.c
85005@@ -37,13 +37,13 @@ static int init_linuxrc(struct subprocess_info *info, struct cred *new)
85006 {
85007 sys_unshare(CLONE_FS | CLONE_FILES);
85008 /* stdin/stdout/stderr for /linuxrc */
85009- sys_open("/dev/console", O_RDWR, 0);
85010+ sys_open((const char __force_user *)"/dev/console", O_RDWR, 0);
85011 sys_dup(0);
85012 sys_dup(0);
85013 /* move initrd over / and chdir/chroot in initrd root */
85014- sys_chdir("/root");
85015- sys_mount(".", "/", NULL, MS_MOVE, NULL);
85016- sys_chroot(".");
85017+ sys_chdir((const char __force_user *)"/root");
85018+ sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
85019+ sys_chroot((const char __force_user *)".");
85020 sys_setsid();
85021 return 0;
85022 }
85023@@ -59,8 +59,8 @@ static void __init handle_initrd(void)
85024 create_dev("/dev/root.old", Root_RAM0);
85025 /* mount initrd on rootfs' /root */
85026 mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
85027- sys_mkdir("/old", 0700);
85028- sys_chdir("/old");
85029+ sys_mkdir((const char __force_user *)"/old", 0700);
85030+ sys_chdir((const char __force_user *)"/old");
85031
85032 /* try loading default modules from initrd */
85033 load_default_modules();
85034@@ -80,31 +80,31 @@ static void __init handle_initrd(void)
85035 current->flags &= ~PF_FREEZER_SKIP;
85036
85037 /* move initrd to rootfs' /old */
85038- sys_mount("..", ".", NULL, MS_MOVE, NULL);
85039+ sys_mount((char __force_user *)"..", (char __force_user *)".", NULL, MS_MOVE, NULL);
85040 /* switch root and cwd back to / of rootfs */
85041- sys_chroot("..");
85042+ sys_chroot((const char __force_user *)"..");
85043
85044 if (new_decode_dev(real_root_dev) == Root_RAM0) {
85045- sys_chdir("/old");
85046+ sys_chdir((const char __force_user *)"/old");
85047 return;
85048 }
85049
85050- sys_chdir("/");
85051+ sys_chdir((const char __force_user *)"/");
85052 ROOT_DEV = new_decode_dev(real_root_dev);
85053 mount_root();
85054
85055 printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
85056- error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
85057+ error = sys_mount((char __force_user *)"/old", (char __force_user *)"/root/initrd", NULL, MS_MOVE, NULL);
85058 if (!error)
85059 printk("okay\n");
85060 else {
85061- int fd = sys_open("/dev/root.old", O_RDWR, 0);
85062+ int fd = sys_open((const char __force_user *)"/dev/root.old", O_RDWR, 0);
85063 if (error == -ENOENT)
85064 printk("/initrd does not exist. Ignored.\n");
85065 else
85066 printk("failed\n");
85067 printk(KERN_NOTICE "Unmounting old root\n");
85068- sys_umount("/old", MNT_DETACH);
85069+ sys_umount((char __force_user *)"/old", MNT_DETACH);
85070 printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
85071 if (fd < 0) {
85072 error = fd;
85073@@ -127,11 +127,11 @@ int __init initrd_load(void)
85074 * mounted in the normal path.
85075 */
85076 if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
85077- sys_unlink("/initrd.image");
85078+ sys_unlink((const char __force_user *)"/initrd.image");
85079 handle_initrd();
85080 return 1;
85081 }
85082 }
85083- sys_unlink("/initrd.image");
85084+ sys_unlink((const char __force_user *)"/initrd.image");
85085 return 0;
85086 }
85087diff --git a/init/do_mounts_md.c b/init/do_mounts_md.c
85088index 8cb6db5..d729f50 100644
85089--- a/init/do_mounts_md.c
85090+++ b/init/do_mounts_md.c
85091@@ -180,7 +180,7 @@ static void __init md_setup_drive(void)
85092 partitioned ? "_d" : "", minor,
85093 md_setup_args[ent].device_names);
85094
85095- fd = sys_open(name, 0, 0);
85096+ fd = sys_open((char __force_user *)name, 0, 0);
85097 if (fd < 0) {
85098 printk(KERN_ERR "md: open failed - cannot start "
85099 "array %s\n", name);
85100@@ -243,7 +243,7 @@ static void __init md_setup_drive(void)
85101 * array without it
85102 */
85103 sys_close(fd);
85104- fd = sys_open(name, 0, 0);
85105+ fd = sys_open((char __force_user *)name, 0, 0);
85106 sys_ioctl(fd, BLKRRPART, 0);
85107 }
85108 sys_close(fd);
85109@@ -293,7 +293,7 @@ static void __init autodetect_raid(void)
85110
85111 wait_for_device_probe();
85112
85113- fd = sys_open("/dev/md0", 0, 0);
85114+ fd = sys_open((const char __force_user *) "/dev/md0", 0, 0);
85115 if (fd >= 0) {
85116 sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
85117 sys_close(fd);
85118diff --git a/init/init_task.c b/init/init_task.c
85119index ba0a7f36..2bcf1d5 100644
85120--- a/init/init_task.c
85121+++ b/init/init_task.c
85122@@ -22,5 +22,9 @@ EXPORT_SYMBOL(init_task);
85123 * Initial thread structure. Alignment of this is handled by a special
85124 * linker map entry.
85125 */
85126+#ifdef CONFIG_X86
85127+union thread_union init_thread_union __init_task_data;
85128+#else
85129 union thread_union init_thread_union __init_task_data =
85130 { INIT_THREAD_INFO(init_task) };
85131+#endif
85132diff --git a/init/initramfs.c b/init/initramfs.c
85133index 93b6139..8d628b7 100644
85134--- a/init/initramfs.c
85135+++ b/init/initramfs.c
85136@@ -84,7 +84,7 @@ static void __init free_hash(void)
85137 }
85138 }
85139
85140-static long __init do_utime(char *filename, time_t mtime)
85141+static long __init do_utime(char __force_user *filename, time_t mtime)
85142 {
85143 struct timespec t[2];
85144
85145@@ -119,7 +119,7 @@ static void __init dir_utime(void)
85146 struct dir_entry *de, *tmp;
85147 list_for_each_entry_safe(de, tmp, &dir_list, list) {
85148 list_del(&de->list);
85149- do_utime(de->name, de->mtime);
85150+ do_utime((char __force_user *)de->name, de->mtime);
85151 kfree(de->name);
85152 kfree(de);
85153 }
85154@@ -281,7 +281,7 @@ static int __init maybe_link(void)
85155 if (nlink >= 2) {
85156 char *old = find_link(major, minor, ino, mode, collected);
85157 if (old)
85158- return (sys_link(old, collected) < 0) ? -1 : 1;
85159+ return (sys_link((char __force_user *)old, (char __force_user *)collected) < 0) ? -1 : 1;
85160 }
85161 return 0;
85162 }
85163@@ -290,11 +290,11 @@ static void __init clean_path(char *path, umode_t mode)
85164 {
85165 struct stat st;
85166
85167- if (!sys_newlstat(path, &st) && (st.st_mode^mode) & S_IFMT) {
85168+ if (!sys_newlstat((char __force_user *)path, (struct stat __force_user *)&st) && (st.st_mode^mode) & S_IFMT) {
85169 if (S_ISDIR(st.st_mode))
85170- sys_rmdir(path);
85171+ sys_rmdir((char __force_user *)path);
85172 else
85173- sys_unlink(path);
85174+ sys_unlink((char __force_user *)path);
85175 }
85176 }
85177
85178@@ -315,7 +315,7 @@ static int __init do_name(void)
85179 int openflags = O_WRONLY|O_CREAT;
85180 if (ml != 1)
85181 openflags |= O_TRUNC;
85182- wfd = sys_open(collected, openflags, mode);
85183+ wfd = sys_open((char __force_user *)collected, openflags, mode);
85184
85185 if (wfd >= 0) {
85186 sys_fchown(wfd, uid, gid);
85187@@ -327,17 +327,17 @@ static int __init do_name(void)
85188 }
85189 }
85190 } else if (S_ISDIR(mode)) {
85191- sys_mkdir(collected, mode);
85192- sys_chown(collected, uid, gid);
85193- sys_chmod(collected, mode);
85194+ sys_mkdir((char __force_user *)collected, mode);
85195+ sys_chown((char __force_user *)collected, uid, gid);
85196+ sys_chmod((char __force_user *)collected, mode);
85197 dir_add(collected, mtime);
85198 } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
85199 S_ISFIFO(mode) || S_ISSOCK(mode)) {
85200 if (maybe_link() == 0) {
85201- sys_mknod(collected, mode, rdev);
85202- sys_chown(collected, uid, gid);
85203- sys_chmod(collected, mode);
85204- do_utime(collected, mtime);
85205+ sys_mknod((char __force_user *)collected, mode, rdev);
85206+ sys_chown((char __force_user *)collected, uid, gid);
85207+ sys_chmod((char __force_user *)collected, mode);
85208+ do_utime((char __force_user *)collected, mtime);
85209 }
85210 }
85211 return 0;
85212@@ -346,15 +346,15 @@ static int __init do_name(void)
85213 static int __init do_copy(void)
85214 {
85215 if (count >= body_len) {
85216- sys_write(wfd, victim, body_len);
85217+ sys_write(wfd, (char __force_user *)victim, body_len);
85218 sys_close(wfd);
85219- do_utime(vcollected, mtime);
85220+ do_utime((char __force_user *)vcollected, mtime);
85221 kfree(vcollected);
85222 eat(body_len);
85223 state = SkipIt;
85224 return 0;
85225 } else {
85226- sys_write(wfd, victim, count);
85227+ sys_write(wfd, (char __force_user *)victim, count);
85228 body_len -= count;
85229 eat(count);
85230 return 1;
85231@@ -365,9 +365,9 @@ static int __init do_symlink(void)
85232 {
85233 collected[N_ALIGN(name_len) + body_len] = '\0';
85234 clean_path(collected, 0);
85235- sys_symlink(collected + N_ALIGN(name_len), collected);
85236- sys_lchown(collected, uid, gid);
85237- do_utime(collected, mtime);
85238+ sys_symlink((char __force_user *)collected + N_ALIGN(name_len), (char __force_user *)collected);
85239+ sys_lchown((char __force_user *)collected, uid, gid);
85240+ do_utime((char __force_user *)collected, mtime);
85241 state = SkipIt;
85242 next_state = Reset;
85243 return 0;
85244diff --git a/init/main.c b/init/main.c
85245index 9c7fd4c..650b4f1 100644
85246--- a/init/main.c
85247+++ b/init/main.c
85248@@ -97,6 +97,8 @@ extern void radix_tree_init(void);
85249 static inline void mark_rodata_ro(void) { }
85250 #endif
85251
85252+extern void grsecurity_init(void);
85253+
85254 /*
85255 * Debug helper: via this flag we know that we are in 'early bootup code'
85256 * where only the boot processor is running with IRQ disabled. This means
85257@@ -158,6 +160,75 @@ static int __init set_reset_devices(char *str)
85258
85259 __setup("reset_devices", set_reset_devices);
85260
85261+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
85262+kgid_t grsec_proc_gid = KGIDT_INIT(CONFIG_GRKERNSEC_PROC_GID);
85263+static int __init setup_grsec_proc_gid(char *str)
85264+{
85265+ grsec_proc_gid = KGIDT_INIT(simple_strtol(str, NULL, 0));
85266+ return 1;
85267+}
85268+__setup("grsec_proc_gid=", setup_grsec_proc_gid);
85269+#endif
85270+
85271+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
85272+unsigned long pax_user_shadow_base __read_only;
85273+EXPORT_SYMBOL(pax_user_shadow_base);
85274+extern char pax_enter_kernel_user[];
85275+extern char pax_exit_kernel_user[];
85276+#endif
85277+
85278+#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
85279+static int __init setup_pax_nouderef(char *str)
85280+{
85281+#ifdef CONFIG_X86_32
85282+ unsigned int cpu;
85283+ struct desc_struct *gdt;
85284+
85285+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
85286+ gdt = get_cpu_gdt_table(cpu);
85287+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
85288+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
85289+ gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
85290+ gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
85291+ }
85292+ loadsegment(ds, __KERNEL_DS);
85293+ loadsegment(es, __KERNEL_DS);
85294+ loadsegment(ss, __KERNEL_DS);
85295+#else
85296+ memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
85297+ memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
85298+ clone_pgd_mask = ~(pgdval_t)0UL;
85299+ pax_user_shadow_base = 0UL;
85300+ setup_clear_cpu_cap(X86_FEATURE_PCID);
85301+ setup_clear_cpu_cap(X86_FEATURE_INVPCID);
85302+#endif
85303+
85304+ return 0;
85305+}
85306+early_param("pax_nouderef", setup_pax_nouderef);
85307+
85308+#ifdef CONFIG_X86_64
85309+static int __init setup_pax_weakuderef(char *str)
85310+{
85311+ if (clone_pgd_mask != ~(pgdval_t)0UL)
85312+ pax_user_shadow_base = 1UL << TASK_SIZE_MAX_SHIFT;
85313+ return 1;
85314+}
85315+__setup("pax_weakuderef", setup_pax_weakuderef);
85316+#endif
85317+#endif
85318+
85319+#ifdef CONFIG_PAX_SOFTMODE
85320+int pax_softmode;
85321+
85322+static int __init setup_pax_softmode(char *str)
85323+{
85324+ get_option(&str, &pax_softmode);
85325+ return 1;
85326+}
85327+__setup("pax_softmode=", setup_pax_softmode);
85328+#endif
85329+
85330 static const char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
85331 const char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
85332 static const char *panic_later, *panic_param;
85333@@ -688,25 +759,24 @@ int __init_or_module do_one_initcall(initcall_t fn)
85334 {
85335 int count = preempt_count();
85336 int ret;
85337- char msgbuf[64];
85338+ const char *msg1 = "", *msg2 = "";
85339
85340 if (initcall_debug)
85341 ret = do_one_initcall_debug(fn);
85342 else
85343 ret = fn();
85344
85345- msgbuf[0] = 0;
85346-
85347 if (preempt_count() != count) {
85348- sprintf(msgbuf, "preemption imbalance ");
85349+ msg1 = " preemption imbalance";
85350 preempt_count_set(count);
85351 }
85352 if (irqs_disabled()) {
85353- strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
85354+ msg2 = " disabled interrupts";
85355 local_irq_enable();
85356 }
85357- WARN(msgbuf[0], "initcall %pF returned with %s\n", fn, msgbuf);
85358+ WARN(*msg1 || *msg2, "initcall %pF returned with%s%s\n", fn, msg1, msg2);
85359
85360+ add_latent_entropy();
85361 return ret;
85362 }
85363
85364@@ -813,8 +883,8 @@ static int run_init_process(const char *init_filename)
85365 {
85366 argv_init[0] = init_filename;
85367 return do_execve(getname_kernel(init_filename),
85368- (const char __user *const __user *)argv_init,
85369- (const char __user *const __user *)envp_init);
85370+ (const char __user *const __force_user *)argv_init,
85371+ (const char __user *const __force_user *)envp_init);
85372 }
85373
85374 static int try_to_run_init_process(const char *init_filename)
85375@@ -831,6 +901,10 @@ static int try_to_run_init_process(const char *init_filename)
85376 return ret;
85377 }
85378
85379+#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
85380+extern int gr_init_ran;
85381+#endif
85382+
85383 static noinline void __init kernel_init_freeable(void);
85384
85385 static int __ref kernel_init(void *unused)
85386@@ -855,6 +929,11 @@ static int __ref kernel_init(void *unused)
85387 ramdisk_execute_command, ret);
85388 }
85389
85390+#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
85391+ /* if no initrd was used, be extra sure we enforce chroot restrictions */
85392+ gr_init_ran = 1;
85393+#endif
85394+
85395 /*
85396 * We try each of these until one succeeds.
85397 *
85398@@ -910,7 +989,7 @@ static noinline void __init kernel_init_freeable(void)
85399 do_basic_setup();
85400
85401 /* Open the /dev/console on the rootfs, this should never fail */
85402- if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0)
85403+ if (sys_open((const char __force_user *) "/dev/console", O_RDWR, 0) < 0)
85404 pr_err("Warning: unable to open an initial console.\n");
85405
85406 (void) sys_dup(0);
85407@@ -923,11 +1002,13 @@ static noinline void __init kernel_init_freeable(void)
85408 if (!ramdisk_execute_command)
85409 ramdisk_execute_command = "/init";
85410
85411- if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
85412+ if (sys_access((const char __force_user *) ramdisk_execute_command, 0) != 0) {
85413 ramdisk_execute_command = NULL;
85414 prepare_namespace();
85415 }
85416
85417+ grsecurity_init();
85418+
85419 /*
85420 * Ok, we have completed the initial bootup, and
85421 * we're essentially up and running. Get rid of the
85422diff --git a/ipc/compat.c b/ipc/compat.c
85423index f486b00..442867f 100644
85424--- a/ipc/compat.c
85425+++ b/ipc/compat.c
85426@@ -399,7 +399,7 @@ COMPAT_SYSCALL_DEFINE6(ipc, u32, call, int, first, int, second,
85427 COMPAT_SHMLBA);
85428 if (err < 0)
85429 return err;
85430- return put_user(raddr, (compat_ulong_t *)compat_ptr(third));
85431+ return put_user(raddr, (compat_ulong_t __user *)compat_ptr(third));
85432 }
85433 case SHMDT:
85434 return sys_shmdt(compat_ptr(ptr));
85435diff --git a/ipc/ipc_sysctl.c b/ipc/ipc_sysctl.c
85436index 1702864..797fa84 100644
85437--- a/ipc/ipc_sysctl.c
85438+++ b/ipc/ipc_sysctl.c
85439@@ -30,7 +30,7 @@ static void *get_ipc(ctl_table *table)
85440 static int proc_ipc_dointvec(ctl_table *table, int write,
85441 void __user *buffer, size_t *lenp, loff_t *ppos)
85442 {
85443- struct ctl_table ipc_table;
85444+ ctl_table_no_const ipc_table;
85445
85446 memcpy(&ipc_table, table, sizeof(ipc_table));
85447 ipc_table.data = get_ipc(table);
85448@@ -41,7 +41,7 @@ static int proc_ipc_dointvec(ctl_table *table, int write,
85449 static int proc_ipc_dointvec_minmax(ctl_table *table, int write,
85450 void __user *buffer, size_t *lenp, loff_t *ppos)
85451 {
85452- struct ctl_table ipc_table;
85453+ ctl_table_no_const ipc_table;
85454
85455 memcpy(&ipc_table, table, sizeof(ipc_table));
85456 ipc_table.data = get_ipc(table);
85457@@ -65,7 +65,7 @@ static int proc_ipc_dointvec_minmax_orphans(ctl_table *table, int write,
85458 static int proc_ipc_callback_dointvec_minmax(ctl_table *table, int write,
85459 void __user *buffer, size_t *lenp, loff_t *ppos)
85460 {
85461- struct ctl_table ipc_table;
85462+ ctl_table_no_const ipc_table;
85463 size_t lenp_bef = *lenp;
85464 int rc;
85465
85466@@ -88,7 +88,7 @@ static int proc_ipc_callback_dointvec_minmax(ctl_table *table, int write,
85467 static int proc_ipc_doulongvec_minmax(ctl_table *table, int write,
85468 void __user *buffer, size_t *lenp, loff_t *ppos)
85469 {
85470- struct ctl_table ipc_table;
85471+ ctl_table_no_const ipc_table;
85472 memcpy(&ipc_table, table, sizeof(ipc_table));
85473 ipc_table.data = get_ipc(table);
85474
85475@@ -122,7 +122,7 @@ static void ipc_auto_callback(int val)
85476 static int proc_ipcauto_dointvec_minmax(ctl_table *table, int write,
85477 void __user *buffer, size_t *lenp, loff_t *ppos)
85478 {
85479- struct ctl_table ipc_table;
85480+ ctl_table_no_const ipc_table;
85481 size_t lenp_bef = *lenp;
85482 int oldval;
85483 int rc;
85484diff --git a/ipc/mq_sysctl.c b/ipc/mq_sysctl.c
85485index 5bb8bfe..a38ec05 100644
85486--- a/ipc/mq_sysctl.c
85487+++ b/ipc/mq_sysctl.c
85488@@ -25,7 +25,7 @@ static void *get_mq(ctl_table *table)
85489 static int proc_mq_dointvec(ctl_table *table, int write,
85490 void __user *buffer, size_t *lenp, loff_t *ppos)
85491 {
85492- struct ctl_table mq_table;
85493+ ctl_table_no_const mq_table;
85494 memcpy(&mq_table, table, sizeof(mq_table));
85495 mq_table.data = get_mq(table);
85496
85497@@ -35,7 +35,7 @@ static int proc_mq_dointvec(ctl_table *table, int write,
85498 static int proc_mq_dointvec_minmax(ctl_table *table, int write,
85499 void __user *buffer, size_t *lenp, loff_t *ppos)
85500 {
85501- struct ctl_table mq_table;
85502+ ctl_table_no_const mq_table;
85503 memcpy(&mq_table, table, sizeof(mq_table));
85504 mq_table.data = get_mq(table);
85505
85506diff --git a/ipc/mqueue.c b/ipc/mqueue.c
85507index c3b3117..1efa933 100644
85508--- a/ipc/mqueue.c
85509+++ b/ipc/mqueue.c
85510@@ -278,6 +278,7 @@ static struct inode *mqueue_get_inode(struct super_block *sb,
85511 mq_bytes = mq_treesize + (info->attr.mq_maxmsg *
85512 info->attr.mq_msgsize);
85513
85514+ gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
85515 spin_lock(&mq_lock);
85516 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
85517 u->mq_bytes + mq_bytes > rlimit(RLIMIT_MSGQUEUE)) {
85518diff --git a/ipc/msg.c b/ipc/msg.c
85519index 6498531..b0ff3c8 100644
85520--- a/ipc/msg.c
85521+++ b/ipc/msg.c
85522@@ -303,18 +303,19 @@ static inline int msg_security(struct kern_ipc_perm *ipcp, int msgflg)
85523 return security_msg_queue_associate(msq, msgflg);
85524 }
85525
85526+static struct ipc_ops msg_ops = {
85527+ .getnew = newque,
85528+ .associate = msg_security,
85529+ .more_checks = NULL
85530+};
85531+
85532 SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg)
85533 {
85534 struct ipc_namespace *ns;
85535- struct ipc_ops msg_ops;
85536 struct ipc_params msg_params;
85537
85538 ns = current->nsproxy->ipc_ns;
85539
85540- msg_ops.getnew = newque;
85541- msg_ops.associate = msg_security;
85542- msg_ops.more_checks = NULL;
85543-
85544 msg_params.key = key;
85545 msg_params.flg = msgflg;
85546
85547diff --git a/ipc/sem.c b/ipc/sem.c
85548index bee5554..e9af81dd 100644
85549--- a/ipc/sem.c
85550+++ b/ipc/sem.c
85551@@ -561,10 +561,15 @@ static inline int sem_more_checks(struct kern_ipc_perm *ipcp,
85552 return 0;
85553 }
85554
85555+static struct ipc_ops sem_ops = {
85556+ .getnew = newary,
85557+ .associate = sem_security,
85558+ .more_checks = sem_more_checks
85559+};
85560+
85561 SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
85562 {
85563 struct ipc_namespace *ns;
85564- struct ipc_ops sem_ops;
85565 struct ipc_params sem_params;
85566
85567 ns = current->nsproxy->ipc_ns;
85568@@ -572,10 +577,6 @@ SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
85569 if (nsems < 0 || nsems > ns->sc_semmsl)
85570 return -EINVAL;
85571
85572- sem_ops.getnew = newary;
85573- sem_ops.associate = sem_security;
85574- sem_ops.more_checks = sem_more_checks;
85575-
85576 sem_params.key = key;
85577 sem_params.flg = semflg;
85578 sem_params.u.nsems = nsems;
85579diff --git a/ipc/shm.c b/ipc/shm.c
85580index 7645961..afc7f02 100644
85581--- a/ipc/shm.c
85582+++ b/ipc/shm.c
85583@@ -72,6 +72,14 @@ static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp);
85584 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
85585 #endif
85586
85587+#ifdef CONFIG_GRKERNSEC
85588+extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
85589+ const time_t shm_createtime, const kuid_t cuid,
85590+ const int shmid);
85591+extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
85592+ const time_t shm_createtime);
85593+#endif
85594+
85595 void shm_init_ns(struct ipc_namespace *ns)
85596 {
85597 ns->shm_ctlmax = SHMMAX;
85598@@ -553,6 +561,14 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
85599 shp->shm_lprid = 0;
85600 shp->shm_atim = shp->shm_dtim = 0;
85601 shp->shm_ctim = get_seconds();
85602+#ifdef CONFIG_GRKERNSEC
85603+ {
85604+ struct timespec timeval;
85605+ do_posix_clock_monotonic_gettime(&timeval);
85606+
85607+ shp->shm_createtime = timeval.tv_sec;
85608+ }
85609+#endif
85610 shp->shm_segsz = size;
85611 shp->shm_nattch = 0;
85612 shp->shm_file = file;
85613@@ -606,18 +622,19 @@ static inline int shm_more_checks(struct kern_ipc_perm *ipcp,
85614 return 0;
85615 }
85616
85617+static struct ipc_ops shm_ops = {
85618+ .getnew = newseg,
85619+ .associate = shm_security,
85620+ .more_checks = shm_more_checks
85621+};
85622+
85623 SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
85624 {
85625 struct ipc_namespace *ns;
85626- struct ipc_ops shm_ops;
85627 struct ipc_params shm_params;
85628
85629 ns = current->nsproxy->ipc_ns;
85630
85631- shm_ops.getnew = newseg;
85632- shm_ops.associate = shm_security;
85633- shm_ops.more_checks = shm_more_checks;
85634-
85635 shm_params.key = key;
85636 shm_params.flg = shmflg;
85637 shm_params.u.size = size;
85638@@ -1088,6 +1105,12 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
85639 f_mode = FMODE_READ | FMODE_WRITE;
85640 }
85641 if (shmflg & SHM_EXEC) {
85642+
85643+#ifdef CONFIG_PAX_MPROTECT
85644+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
85645+ goto out;
85646+#endif
85647+
85648 prot |= PROT_EXEC;
85649 acc_mode |= S_IXUGO;
85650 }
85651@@ -1112,6 +1135,15 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
85652 if (err)
85653 goto out_unlock;
85654
85655+#ifdef CONFIG_GRKERNSEC
85656+ if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
85657+ shp->shm_perm.cuid, shmid) ||
85658+ !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
85659+ err = -EACCES;
85660+ goto out_unlock;
85661+ }
85662+#endif
85663+
85664 ipc_lock_object(&shp->shm_perm);
85665
85666 /* check if shm_destroy() is tearing down shp */
85667@@ -1124,6 +1156,9 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
85668 path = shp->shm_file->f_path;
85669 path_get(&path);
85670 shp->shm_nattch++;
85671+#ifdef CONFIG_GRKERNSEC
85672+ shp->shm_lapid = current->pid;
85673+#endif
85674 size = i_size_read(path.dentry->d_inode);
85675 ipc_unlock_object(&shp->shm_perm);
85676 rcu_read_unlock();
85677diff --git a/ipc/util.c b/ipc/util.c
85678index e1b4c6d..8174204 100644
85679--- a/ipc/util.c
85680+++ b/ipc/util.c
85681@@ -71,6 +71,8 @@ struct ipc_proc_iface {
85682 int (*show)(struct seq_file *, void *);
85683 };
85684
85685+extern int gr_ipc_permitted(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp, int requested_mode, int granted_mode);
85686+
85687 static void ipc_memory_notifier(struct work_struct *work)
85688 {
85689 ipcns_notify(IPCNS_MEMCHANGED);
85690@@ -537,6 +539,10 @@ int ipcperms(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp, short flag)
85691 granted_mode >>= 6;
85692 else if (in_group_p(ipcp->cgid) || in_group_p(ipcp->gid))
85693 granted_mode >>= 3;
85694+
85695+ if (!gr_ipc_permitted(ns, ipcp, requested_mode, granted_mode))
85696+ return -1;
85697+
85698 /* is there some bit set in requested_mode but not in granted_mode? */
85699 if ((requested_mode & ~granted_mode & 0007) &&
85700 !ns_capable(ns->user_ns, CAP_IPC_OWNER))
85701diff --git a/kernel/acct.c b/kernel/acct.c
85702index 8d6e145..33e0b1e 100644
85703--- a/kernel/acct.c
85704+++ b/kernel/acct.c
85705@@ -556,7 +556,7 @@ static void do_acct_process(struct bsd_acct_struct *acct,
85706 */
85707 flim = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
85708 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
85709- file->f_op->write(file, (char *)&ac,
85710+ file->f_op->write(file, (char __force_user *)&ac,
85711 sizeof(acct_t), &file->f_pos);
85712 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim;
85713 set_fs(fs);
85714diff --git a/kernel/audit.c b/kernel/audit.c
85715index d5f31c1..06646e1 100644
85716--- a/kernel/audit.c
85717+++ b/kernel/audit.c
85718@@ -122,7 +122,7 @@ u32 audit_sig_sid = 0;
85719 3) suppressed due to audit_rate_limit
85720 4) suppressed due to audit_backlog_limit
85721 */
85722-static atomic_t audit_lost = ATOMIC_INIT(0);
85723+static atomic_unchecked_t audit_lost = ATOMIC_INIT(0);
85724
85725 /* The netlink socket. */
85726 static struct sock *audit_sock;
85727@@ -256,7 +256,7 @@ void audit_log_lost(const char *message)
85728 unsigned long now;
85729 int print;
85730
85731- atomic_inc(&audit_lost);
85732+ atomic_inc_unchecked(&audit_lost);
85733
85734 print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
85735
85736@@ -273,7 +273,7 @@ void audit_log_lost(const char *message)
85737 if (print) {
85738 if (printk_ratelimit())
85739 pr_warn("audit_lost=%u audit_rate_limit=%u audit_backlog_limit=%u\n",
85740- atomic_read(&audit_lost),
85741+ atomic_read_unchecked(&audit_lost),
85742 audit_rate_limit,
85743 audit_backlog_limit);
85744 audit_panic(message);
85745@@ -803,7 +803,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
85746 s.pid = audit_pid;
85747 s.rate_limit = audit_rate_limit;
85748 s.backlog_limit = audit_backlog_limit;
85749- s.lost = atomic_read(&audit_lost);
85750+ s.lost = atomic_read_unchecked(&audit_lost);
85751 s.backlog = skb_queue_len(&audit_skb_queue);
85752 s.version = AUDIT_VERSION_LATEST;
85753 s.backlog_wait_time = audit_backlog_wait_time;
85754diff --git a/kernel/auditsc.c b/kernel/auditsc.c
85755index 3b29605..3604797 100644
85756--- a/kernel/auditsc.c
85757+++ b/kernel/auditsc.c
85758@@ -720,6 +720,22 @@ static enum audit_state audit_filter_task(struct task_struct *tsk, char **key)
85759 return AUDIT_BUILD_CONTEXT;
85760 }
85761
85762+static int audit_in_mask(const struct audit_krule *rule, unsigned long val)
85763+{
85764+ int word, bit;
85765+
85766+ if (val > 0xffffffff)
85767+ return false;
85768+
85769+ word = AUDIT_WORD(val);
85770+ if (word >= AUDIT_BITMASK_SIZE)
85771+ return false;
85772+
85773+ bit = AUDIT_BIT(val);
85774+
85775+ return rule->mask[word] & bit;
85776+}
85777+
85778 /* At syscall entry and exit time, this filter is called if the
85779 * audit_state is not low enough that auditing cannot take place, but is
85780 * also not high enough that we already know we have to write an audit
85781@@ -737,11 +753,8 @@ static enum audit_state audit_filter_syscall(struct task_struct *tsk,
85782
85783 rcu_read_lock();
85784 if (!list_empty(list)) {
85785- int word = AUDIT_WORD(ctx->major);
85786- int bit = AUDIT_BIT(ctx->major);
85787-
85788 list_for_each_entry_rcu(e, list, list) {
85789- if ((e->rule.mask[word] & bit) == bit &&
85790+ if (audit_in_mask(&e->rule, ctx->major) &&
85791 audit_filter_rules(tsk, &e->rule, ctx, NULL,
85792 &state, false)) {
85793 rcu_read_unlock();
85794@@ -761,20 +774,16 @@ static enum audit_state audit_filter_syscall(struct task_struct *tsk,
85795 static int audit_filter_inode_name(struct task_struct *tsk,
85796 struct audit_names *n,
85797 struct audit_context *ctx) {
85798- int word, bit;
85799 int h = audit_hash_ino((u32)n->ino);
85800 struct list_head *list = &audit_inode_hash[h];
85801 struct audit_entry *e;
85802 enum audit_state state;
85803
85804- word = AUDIT_WORD(ctx->major);
85805- bit = AUDIT_BIT(ctx->major);
85806-
85807 if (list_empty(list))
85808 return 0;
85809
85810 list_for_each_entry_rcu(e, list, list) {
85811- if ((e->rule.mask[word] & bit) == bit &&
85812+ if (audit_in_mask(&e->rule, ctx->major) &&
85813 audit_filter_rules(tsk, &e->rule, ctx, n, &state, false)) {
85814 ctx->current_state = state;
85815 return 1;
85816@@ -1945,7 +1954,7 @@ int auditsc_get_stamp(struct audit_context *ctx,
85817 }
85818
85819 /* global counter which is incremented every time something logs in */
85820-static atomic_t session_id = ATOMIC_INIT(0);
85821+static atomic_unchecked_t session_id = ATOMIC_INIT(0);
85822
85823 static int audit_set_loginuid_perm(kuid_t loginuid)
85824 {
85825@@ -2014,7 +2023,7 @@ int audit_set_loginuid(kuid_t loginuid)
85826
85827 /* are we setting or clearing? */
85828 if (uid_valid(loginuid))
85829- sessionid = (unsigned int)atomic_inc_return(&session_id);
85830+ sessionid = (unsigned int)atomic_inc_return_unchecked(&session_id);
85831
85832 task->sessionid = sessionid;
85833 task->loginuid = loginuid;
85834diff --git a/kernel/capability.c b/kernel/capability.c
85835index 34019c5..363f279 100644
85836--- a/kernel/capability.c
85837+++ b/kernel/capability.c
85838@@ -202,6 +202,9 @@ SYSCALL_DEFINE2(capget, cap_user_header_t, header, cap_user_data_t, dataptr)
85839 * before modification is attempted and the application
85840 * fails.
85841 */
85842+ if (tocopy > ARRAY_SIZE(kdata))
85843+ return -EFAULT;
85844+
85845 if (copy_to_user(dataptr, kdata, tocopy
85846 * sizeof(struct __user_cap_data_struct))) {
85847 return -EFAULT;
85848@@ -303,10 +306,11 @@ bool has_ns_capability(struct task_struct *t,
85849 int ret;
85850
85851 rcu_read_lock();
85852- ret = security_capable(__task_cred(t), ns, cap);
85853+ ret = security_capable(__task_cred(t), ns, cap) == 0 &&
85854+ gr_task_is_capable(t, __task_cred(t), cap);
85855 rcu_read_unlock();
85856
85857- return (ret == 0);
85858+ return ret;
85859 }
85860
85861 /**
85862@@ -343,10 +347,10 @@ bool has_ns_capability_noaudit(struct task_struct *t,
85863 int ret;
85864
85865 rcu_read_lock();
85866- ret = security_capable_noaudit(__task_cred(t), ns, cap);
85867+ ret = security_capable_noaudit(__task_cred(t), ns, cap) == 0 && gr_task_is_capable_nolog(t, cap);
85868 rcu_read_unlock();
85869
85870- return (ret == 0);
85871+ return ret;
85872 }
85873
85874 /**
85875@@ -384,7 +388,7 @@ bool ns_capable(struct user_namespace *ns, int cap)
85876 BUG();
85877 }
85878
85879- if (security_capable(current_cred(), ns, cap) == 0) {
85880+ if (security_capable(current_cred(), ns, cap) == 0 && gr_is_capable(cap)) {
85881 current->flags |= PF_SUPERPRIV;
85882 return true;
85883 }
85884@@ -392,6 +396,21 @@ bool ns_capable(struct user_namespace *ns, int cap)
85885 }
85886 EXPORT_SYMBOL(ns_capable);
85887
85888+bool ns_capable_nolog(struct user_namespace *ns, int cap)
85889+{
85890+ if (unlikely(!cap_valid(cap))) {
85891+ printk(KERN_CRIT "capable_nolog() called with invalid cap=%u\n", cap);
85892+ BUG();
85893+ }
85894+
85895+ if (security_capable_noaudit(current_cred(), ns, cap) == 0 && gr_is_capable_nolog(cap)) {
85896+ current->flags |= PF_SUPERPRIV;
85897+ return true;
85898+ }
85899+ return false;
85900+}
85901+EXPORT_SYMBOL(ns_capable_nolog);
85902+
85903 /**
85904 * file_ns_capable - Determine if the file's opener had a capability in effect
85905 * @file: The file we want to check
85906@@ -432,6 +451,12 @@ bool capable(int cap)
85907 }
85908 EXPORT_SYMBOL(capable);
85909
85910+bool capable_nolog(int cap)
85911+{
85912+ return ns_capable_nolog(&init_user_ns, cap);
85913+}
85914+EXPORT_SYMBOL(capable_nolog);
85915+
85916 /**
85917 * inode_capable - Check superior capability over inode
85918 * @inode: The inode in question
85919@@ -453,3 +478,11 @@ bool inode_capable(const struct inode *inode, int cap)
85920 return ns_capable(ns, cap) && kuid_has_mapping(ns, inode->i_uid);
85921 }
85922 EXPORT_SYMBOL(inode_capable);
85923+
85924+bool inode_capable_nolog(const struct inode *inode, int cap)
85925+{
85926+ struct user_namespace *ns = current_user_ns();
85927+
85928+ return ns_capable_nolog(ns, cap) && kuid_has_mapping(ns, inode->i_uid);
85929+}
85930+EXPORT_SYMBOL(inode_capable_nolog);
85931diff --git a/kernel/cgroup.c b/kernel/cgroup.c
85932index 0c753dd..dd7d3d6 100644
85933--- a/kernel/cgroup.c
85934+++ b/kernel/cgroup.c
85935@@ -5372,7 +5372,7 @@ static int cgroup_css_links_read(struct seq_file *seq, void *v)
85936 struct css_set *cset = link->cset;
85937 struct task_struct *task;
85938 int count = 0;
85939- seq_printf(seq, "css_set %p\n", cset);
85940+ seq_printf(seq, "css_set %pK\n", cset);
85941 list_for_each_entry(task, &cset->tasks, cg_list) {
85942 if (count++ > MAX_TASKS_SHOWN_PER_CSS) {
85943 seq_puts(seq, " ...\n");
85944diff --git a/kernel/compat.c b/kernel/compat.c
85945index 0a09e48..b46b3d78 100644
85946--- a/kernel/compat.c
85947+++ b/kernel/compat.c
85948@@ -13,6 +13,7 @@
85949
85950 #include <linux/linkage.h>
85951 #include <linux/compat.h>
85952+#include <linux/module.h>
85953 #include <linux/errno.h>
85954 #include <linux/time.h>
85955 #include <linux/signal.h>
85956@@ -220,7 +221,7 @@ static long compat_nanosleep_restart(struct restart_block *restart)
85957 mm_segment_t oldfs;
85958 long ret;
85959
85960- restart->nanosleep.rmtp = (struct timespec __user *) &rmt;
85961+ restart->nanosleep.rmtp = (struct timespec __force_user *) &rmt;
85962 oldfs = get_fs();
85963 set_fs(KERNEL_DS);
85964 ret = hrtimer_nanosleep_restart(restart);
85965@@ -252,7 +253,7 @@ asmlinkage long compat_sys_nanosleep(struct compat_timespec __user *rqtp,
85966 oldfs = get_fs();
85967 set_fs(KERNEL_DS);
85968 ret = hrtimer_nanosleep(&tu,
85969- rmtp ? (struct timespec __user *)&rmt : NULL,
85970+ rmtp ? (struct timespec __force_user *)&rmt : NULL,
85971 HRTIMER_MODE_REL, CLOCK_MONOTONIC);
85972 set_fs(oldfs);
85973
85974@@ -361,7 +362,7 @@ asmlinkage long compat_sys_sigpending(compat_old_sigset_t __user *set)
85975 mm_segment_t old_fs = get_fs();
85976
85977 set_fs(KERNEL_DS);
85978- ret = sys_sigpending((old_sigset_t __user *) &s);
85979+ ret = sys_sigpending((old_sigset_t __force_user *) &s);
85980 set_fs(old_fs);
85981 if (ret == 0)
85982 ret = put_user(s, set);
85983@@ -451,7 +452,7 @@ asmlinkage long compat_sys_old_getrlimit(unsigned int resource,
85984 mm_segment_t old_fs = get_fs();
85985
85986 set_fs(KERNEL_DS);
85987- ret = sys_old_getrlimit(resource, &r);
85988+ ret = sys_old_getrlimit(resource, (struct rlimit __force_user *)&r);
85989 set_fs(old_fs);
85990
85991 if (!ret) {
85992@@ -533,8 +534,8 @@ COMPAT_SYSCALL_DEFINE4(wait4,
85993 set_fs (KERNEL_DS);
85994 ret = sys_wait4(pid,
85995 (stat_addr ?
85996- (unsigned int __user *) &status : NULL),
85997- options, (struct rusage __user *) &r);
85998+ (unsigned int __force_user *) &status : NULL),
85999+ options, (struct rusage __force_user *) &r);
86000 set_fs (old_fs);
86001
86002 if (ret > 0) {
86003@@ -560,8 +561,8 @@ COMPAT_SYSCALL_DEFINE5(waitid,
86004 memset(&info, 0, sizeof(info));
86005
86006 set_fs(KERNEL_DS);
86007- ret = sys_waitid(which, pid, (siginfo_t __user *)&info, options,
86008- uru ? (struct rusage __user *)&ru : NULL);
86009+ ret = sys_waitid(which, pid, (siginfo_t __force_user *)&info, options,
86010+ uru ? (struct rusage __force_user *)&ru : NULL);
86011 set_fs(old_fs);
86012
86013 if ((ret < 0) || (info.si_signo == 0))
86014@@ -695,8 +696,8 @@ long compat_sys_timer_settime(timer_t timer_id, int flags,
86015 oldfs = get_fs();
86016 set_fs(KERNEL_DS);
86017 err = sys_timer_settime(timer_id, flags,
86018- (struct itimerspec __user *) &newts,
86019- (struct itimerspec __user *) &oldts);
86020+ (struct itimerspec __force_user *) &newts,
86021+ (struct itimerspec __force_user *) &oldts);
86022 set_fs(oldfs);
86023 if (!err && old && put_compat_itimerspec(old, &oldts))
86024 return -EFAULT;
86025@@ -713,7 +714,7 @@ long compat_sys_timer_gettime(timer_t timer_id,
86026 oldfs = get_fs();
86027 set_fs(KERNEL_DS);
86028 err = sys_timer_gettime(timer_id,
86029- (struct itimerspec __user *) &ts);
86030+ (struct itimerspec __force_user *) &ts);
86031 set_fs(oldfs);
86032 if (!err && put_compat_itimerspec(setting, &ts))
86033 return -EFAULT;
86034@@ -732,7 +733,7 @@ long compat_sys_clock_settime(clockid_t which_clock,
86035 oldfs = get_fs();
86036 set_fs(KERNEL_DS);
86037 err = sys_clock_settime(which_clock,
86038- (struct timespec __user *) &ts);
86039+ (struct timespec __force_user *) &ts);
86040 set_fs(oldfs);
86041 return err;
86042 }
86043@@ -747,7 +748,7 @@ long compat_sys_clock_gettime(clockid_t which_clock,
86044 oldfs = get_fs();
86045 set_fs(KERNEL_DS);
86046 err = sys_clock_gettime(which_clock,
86047- (struct timespec __user *) &ts);
86048+ (struct timespec __force_user *) &ts);
86049 set_fs(oldfs);
86050 if (!err && put_compat_timespec(&ts, tp))
86051 return -EFAULT;
86052@@ -767,7 +768,7 @@ long compat_sys_clock_adjtime(clockid_t which_clock,
86053
86054 oldfs = get_fs();
86055 set_fs(KERNEL_DS);
86056- ret = sys_clock_adjtime(which_clock, (struct timex __user *) &txc);
86057+ ret = sys_clock_adjtime(which_clock, (struct timex __force_user *) &txc);
86058 set_fs(oldfs);
86059
86060 err = compat_put_timex(utp, &txc);
86061@@ -787,7 +788,7 @@ long compat_sys_clock_getres(clockid_t which_clock,
86062 oldfs = get_fs();
86063 set_fs(KERNEL_DS);
86064 err = sys_clock_getres(which_clock,
86065- (struct timespec __user *) &ts);
86066+ (struct timespec __force_user *) &ts);
86067 set_fs(oldfs);
86068 if (!err && tp && put_compat_timespec(&ts, tp))
86069 return -EFAULT;
86070@@ -799,9 +800,9 @@ static long compat_clock_nanosleep_restart(struct restart_block *restart)
86071 long err;
86072 mm_segment_t oldfs;
86073 struct timespec tu;
86074- struct compat_timespec *rmtp = restart->nanosleep.compat_rmtp;
86075+ struct compat_timespec __user *rmtp = restart->nanosleep.compat_rmtp;
86076
86077- restart->nanosleep.rmtp = (struct timespec __user *) &tu;
86078+ restart->nanosleep.rmtp = (struct timespec __force_user *) &tu;
86079 oldfs = get_fs();
86080 set_fs(KERNEL_DS);
86081 err = clock_nanosleep_restart(restart);
86082@@ -833,8 +834,8 @@ long compat_sys_clock_nanosleep(clockid_t which_clock, int flags,
86083 oldfs = get_fs();
86084 set_fs(KERNEL_DS);
86085 err = sys_clock_nanosleep(which_clock, flags,
86086- (struct timespec __user *) &in,
86087- (struct timespec __user *) &out);
86088+ (struct timespec __force_user *) &in,
86089+ (struct timespec __force_user *) &out);
86090 set_fs(oldfs);
86091
86092 if ((err == -ERESTART_RESTARTBLOCK) && rmtp &&
86093@@ -1128,7 +1129,7 @@ COMPAT_SYSCALL_DEFINE2(sched_rr_get_interval,
86094 mm_segment_t old_fs = get_fs();
86095
86096 set_fs(KERNEL_DS);
86097- ret = sys_sched_rr_get_interval(pid, (struct timespec __user *)&t);
86098+ ret = sys_sched_rr_get_interval(pid, (struct timespec __force_user *)&t);
86099 set_fs(old_fs);
86100 if (put_compat_timespec(&t, interval))
86101 return -EFAULT;
86102diff --git a/kernel/configs.c b/kernel/configs.c
86103index c18b1f1..b9a0132 100644
86104--- a/kernel/configs.c
86105+++ b/kernel/configs.c
86106@@ -74,8 +74,19 @@ static int __init ikconfig_init(void)
86107 struct proc_dir_entry *entry;
86108
86109 /* create the current config file */
86110+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
86111+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
86112+ entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
86113+ &ikconfig_file_ops);
86114+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
86115+ entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
86116+ &ikconfig_file_ops);
86117+#endif
86118+#else
86119 entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
86120 &ikconfig_file_ops);
86121+#endif
86122+
86123 if (!entry)
86124 return -ENOMEM;
86125
86126diff --git a/kernel/cred.c b/kernel/cred.c
86127index e0573a4..20fb164 100644
86128--- a/kernel/cred.c
86129+++ b/kernel/cred.c
86130@@ -164,6 +164,16 @@ void exit_creds(struct task_struct *tsk)
86131 validate_creds(cred);
86132 alter_cred_subscribers(cred, -1);
86133 put_cred(cred);
86134+
86135+#ifdef CONFIG_GRKERNSEC_SETXID
86136+ cred = (struct cred *) tsk->delayed_cred;
86137+ if (cred != NULL) {
86138+ tsk->delayed_cred = NULL;
86139+ validate_creds(cred);
86140+ alter_cred_subscribers(cred, -1);
86141+ put_cred(cred);
86142+ }
86143+#endif
86144 }
86145
86146 /**
86147@@ -411,7 +421,7 @@ static bool cred_cap_issubset(const struct cred *set, const struct cred *subset)
86148 * Always returns 0 thus allowing this function to be tail-called at the end
86149 * of, say, sys_setgid().
86150 */
86151-int commit_creds(struct cred *new)
86152+static int __commit_creds(struct cred *new)
86153 {
86154 struct task_struct *task = current;
86155 const struct cred *old = task->real_cred;
86156@@ -430,6 +440,8 @@ int commit_creds(struct cred *new)
86157
86158 get_cred(new); /* we will require a ref for the subj creds too */
86159
86160+ gr_set_role_label(task, new->uid, new->gid);
86161+
86162 /* dumpability changes */
86163 if (!uid_eq(old->euid, new->euid) ||
86164 !gid_eq(old->egid, new->egid) ||
86165@@ -479,6 +491,108 @@ int commit_creds(struct cred *new)
86166 put_cred(old);
86167 return 0;
86168 }
86169+#ifdef CONFIG_GRKERNSEC_SETXID
86170+extern int set_user(struct cred *new);
86171+
86172+void gr_delayed_cred_worker(void)
86173+{
86174+ const struct cred *new = current->delayed_cred;
86175+ struct cred *ncred;
86176+
86177+ current->delayed_cred = NULL;
86178+
86179+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID) && new != NULL) {
86180+ // from doing get_cred on it when queueing this
86181+ put_cred(new);
86182+ return;
86183+ } else if (new == NULL)
86184+ return;
86185+
86186+ ncred = prepare_creds();
86187+ if (!ncred)
86188+ goto die;
86189+ // uids
86190+ ncred->uid = new->uid;
86191+ ncred->euid = new->euid;
86192+ ncred->suid = new->suid;
86193+ ncred->fsuid = new->fsuid;
86194+ // gids
86195+ ncred->gid = new->gid;
86196+ ncred->egid = new->egid;
86197+ ncred->sgid = new->sgid;
86198+ ncred->fsgid = new->fsgid;
86199+ // groups
86200+ if (set_groups(ncred, new->group_info) < 0) {
86201+ abort_creds(ncred);
86202+ goto die;
86203+ }
86204+ // caps
86205+ ncred->securebits = new->securebits;
86206+ ncred->cap_inheritable = new->cap_inheritable;
86207+ ncred->cap_permitted = new->cap_permitted;
86208+ ncred->cap_effective = new->cap_effective;
86209+ ncred->cap_bset = new->cap_bset;
86210+
86211+ if (set_user(ncred)) {
86212+ abort_creds(ncred);
86213+ goto die;
86214+ }
86215+
86216+ // from doing get_cred on it when queueing this
86217+ put_cred(new);
86218+
86219+ __commit_creds(ncred);
86220+ return;
86221+die:
86222+ // from doing get_cred on it when queueing this
86223+ put_cred(new);
86224+ do_group_exit(SIGKILL);
86225+}
86226+#endif
86227+
86228+int commit_creds(struct cred *new)
86229+{
86230+#ifdef CONFIG_GRKERNSEC_SETXID
86231+ int ret;
86232+ int schedule_it = 0;
86233+ struct task_struct *t;
86234+ unsigned oldsecurebits = current_cred()->securebits;
86235+
86236+ /* we won't get called with tasklist_lock held for writing
86237+ and interrupts disabled as the cred struct in that case is
86238+ init_cred
86239+ */
86240+ if (grsec_enable_setxid && !current_is_single_threaded() &&
86241+ uid_eq(current_uid(), GLOBAL_ROOT_UID) &&
86242+ !uid_eq(new->uid, GLOBAL_ROOT_UID)) {
86243+ schedule_it = 1;
86244+ }
86245+ ret = __commit_creds(new);
86246+ if (schedule_it) {
86247+ rcu_read_lock();
86248+ read_lock(&tasklist_lock);
86249+ for (t = next_thread(current); t != current;
86250+ t = next_thread(t)) {
86251+ /* we'll check if the thread has uid 0 in
86252+ * the delayed worker routine
86253+ */
86254+ if (task_securebits(t) == oldsecurebits &&
86255+ t->delayed_cred == NULL) {
86256+ t->delayed_cred = get_cred(new);
86257+ set_tsk_thread_flag(t, TIF_GRSEC_SETXID);
86258+ set_tsk_need_resched(t);
86259+ }
86260+ }
86261+ read_unlock(&tasklist_lock);
86262+ rcu_read_unlock();
86263+ }
86264+
86265+ return ret;
86266+#else
86267+ return __commit_creds(new);
86268+#endif
86269+}
86270+
86271 EXPORT_SYMBOL(commit_creds);
86272
86273 /**
86274diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
86275index 334b398..9145fb1 100644
86276--- a/kernel/debug/debug_core.c
86277+++ b/kernel/debug/debug_core.c
86278@@ -123,7 +123,7 @@ static DEFINE_RAW_SPINLOCK(dbg_slave_lock);
86279 */
86280 static atomic_t masters_in_kgdb;
86281 static atomic_t slaves_in_kgdb;
86282-static atomic_t kgdb_break_tasklet_var;
86283+static atomic_unchecked_t kgdb_break_tasklet_var;
86284 atomic_t kgdb_setting_breakpoint;
86285
86286 struct task_struct *kgdb_usethread;
86287@@ -133,7 +133,7 @@ int kgdb_single_step;
86288 static pid_t kgdb_sstep_pid;
86289
86290 /* to keep track of the CPU which is doing the single stepping*/
86291-atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
86292+atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
86293
86294 /*
86295 * If you are debugging a problem where roundup (the collection of
86296@@ -541,7 +541,7 @@ return_normal:
86297 * kernel will only try for the value of sstep_tries before
86298 * giving up and continuing on.
86299 */
86300- if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
86301+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
86302 (kgdb_info[cpu].task &&
86303 kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
86304 atomic_set(&kgdb_active, -1);
86305@@ -639,8 +639,8 @@ cpu_master_loop:
86306 }
86307
86308 kgdb_restore:
86309- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
86310- int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step);
86311+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
86312+ int sstep_cpu = atomic_read_unchecked(&kgdb_cpu_doing_single_step);
86313 if (kgdb_info[sstep_cpu].task)
86314 kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid;
86315 else
86316@@ -917,18 +917,18 @@ static void kgdb_unregister_callbacks(void)
86317 static void kgdb_tasklet_bpt(unsigned long ing)
86318 {
86319 kgdb_breakpoint();
86320- atomic_set(&kgdb_break_tasklet_var, 0);
86321+ atomic_set_unchecked(&kgdb_break_tasklet_var, 0);
86322 }
86323
86324 static DECLARE_TASKLET(kgdb_tasklet_breakpoint, kgdb_tasklet_bpt, 0);
86325
86326 void kgdb_schedule_breakpoint(void)
86327 {
86328- if (atomic_read(&kgdb_break_tasklet_var) ||
86329+ if (atomic_read_unchecked(&kgdb_break_tasklet_var) ||
86330 atomic_read(&kgdb_active) != -1 ||
86331 atomic_read(&kgdb_setting_breakpoint))
86332 return;
86333- atomic_inc(&kgdb_break_tasklet_var);
86334+ atomic_inc_unchecked(&kgdb_break_tasklet_var);
86335 tasklet_schedule(&kgdb_tasklet_breakpoint);
86336 }
86337 EXPORT_SYMBOL_GPL(kgdb_schedule_breakpoint);
86338diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
86339index 0b097c8..11dd5c5 100644
86340--- a/kernel/debug/kdb/kdb_main.c
86341+++ b/kernel/debug/kdb/kdb_main.c
86342@@ -1977,7 +1977,7 @@ static int kdb_lsmod(int argc, const char **argv)
86343 continue;
86344
86345 kdb_printf("%-20s%8u 0x%p ", mod->name,
86346- mod->core_size, (void *)mod);
86347+ mod->core_size_rx + mod->core_size_rw, (void *)mod);
86348 #ifdef CONFIG_MODULE_UNLOAD
86349 kdb_printf("%4ld ", module_refcount(mod));
86350 #endif
86351@@ -1987,7 +1987,7 @@ static int kdb_lsmod(int argc, const char **argv)
86352 kdb_printf(" (Loading)");
86353 else
86354 kdb_printf(" (Live)");
86355- kdb_printf(" 0x%p", mod->module_core);
86356+ kdb_printf(" 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
86357
86358 #ifdef CONFIG_MODULE_UNLOAD
86359 {
86360diff --git a/kernel/events/core.c b/kernel/events/core.c
86361index fa0b2d4..67a1c7a 100644
86362--- a/kernel/events/core.c
86363+++ b/kernel/events/core.c
86364@@ -158,8 +158,15 @@ static struct srcu_struct pmus_srcu;
86365 * 0 - disallow raw tracepoint access for unpriv
86366 * 1 - disallow cpu events for unpriv
86367 * 2 - disallow kernel profiling for unpriv
86368+ * 3 - disallow all unpriv perf event use
86369 */
86370-int sysctl_perf_event_paranoid __read_mostly = 1;
86371+#ifdef CONFIG_GRKERNSEC_PERF_HARDEN
86372+int sysctl_perf_event_legitimately_concerned __read_mostly = 3;
86373+#elif defined(CONFIG_GRKERNSEC_HIDESYM)
86374+int sysctl_perf_event_legitimately_concerned __read_mostly = 2;
86375+#else
86376+int sysctl_perf_event_legitimately_concerned __read_mostly = 1;
86377+#endif
86378
86379 /* Minimum for 512 kiB + 1 user control page */
86380 int sysctl_perf_event_mlock __read_mostly = 512 + (PAGE_SIZE / 1024); /* 'free' kiB per user */
86381@@ -185,7 +192,7 @@ void update_perf_cpu_limits(void)
86382
86383 tmp *= sysctl_perf_cpu_time_max_percent;
86384 do_div(tmp, 100);
86385- ACCESS_ONCE(perf_sample_allowed_ns) = tmp;
86386+ ACCESS_ONCE_RW(perf_sample_allowed_ns) = tmp;
86387 }
86388
86389 static int perf_rotate_context(struct perf_cpu_context *cpuctx);
86390@@ -272,7 +279,7 @@ void perf_sample_event_took(u64 sample_len_ns)
86391 update_perf_cpu_limits();
86392 }
86393
86394-static atomic64_t perf_event_id;
86395+static atomic64_unchecked_t perf_event_id;
86396
86397 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
86398 enum event_type_t event_type);
86399@@ -2986,7 +2993,7 @@ static void __perf_event_read(void *info)
86400
86401 static inline u64 perf_event_count(struct perf_event *event)
86402 {
86403- return local64_read(&event->count) + atomic64_read(&event->child_count);
86404+ return local64_read(&event->count) + atomic64_read_unchecked(&event->child_count);
86405 }
86406
86407 static u64 perf_event_read(struct perf_event *event)
86408@@ -3354,9 +3361,9 @@ u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
86409 mutex_lock(&event->child_mutex);
86410 total += perf_event_read(event);
86411 *enabled += event->total_time_enabled +
86412- atomic64_read(&event->child_total_time_enabled);
86413+ atomic64_read_unchecked(&event->child_total_time_enabled);
86414 *running += event->total_time_running +
86415- atomic64_read(&event->child_total_time_running);
86416+ atomic64_read_unchecked(&event->child_total_time_running);
86417
86418 list_for_each_entry(child, &event->child_list, child_list) {
86419 total += perf_event_read(child);
86420@@ -3785,10 +3792,10 @@ void perf_event_update_userpage(struct perf_event *event)
86421 userpg->offset -= local64_read(&event->hw.prev_count);
86422
86423 userpg->time_enabled = enabled +
86424- atomic64_read(&event->child_total_time_enabled);
86425+ atomic64_read_unchecked(&event->child_total_time_enabled);
86426
86427 userpg->time_running = running +
86428- atomic64_read(&event->child_total_time_running);
86429+ atomic64_read_unchecked(&event->child_total_time_running);
86430
86431 arch_perf_update_userpage(userpg, now);
86432
86433@@ -4339,7 +4346,7 @@ perf_output_sample_ustack(struct perf_output_handle *handle, u64 dump_size,
86434
86435 /* Data. */
86436 sp = perf_user_stack_pointer(regs);
86437- rem = __output_copy_user(handle, (void *) sp, dump_size);
86438+ rem = __output_copy_user(handle, (void __user *) sp, dump_size);
86439 dyn_size = dump_size - rem;
86440
86441 perf_output_skip(handle, rem);
86442@@ -4430,11 +4437,11 @@ static void perf_output_read_one(struct perf_output_handle *handle,
86443 values[n++] = perf_event_count(event);
86444 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
86445 values[n++] = enabled +
86446- atomic64_read(&event->child_total_time_enabled);
86447+ atomic64_read_unchecked(&event->child_total_time_enabled);
86448 }
86449 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
86450 values[n++] = running +
86451- atomic64_read(&event->child_total_time_running);
86452+ atomic64_read_unchecked(&event->child_total_time_running);
86453 }
86454 if (read_format & PERF_FORMAT_ID)
86455 values[n++] = primary_event_id(event);
86456@@ -6704,7 +6711,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
86457 event->parent = parent_event;
86458
86459 event->ns = get_pid_ns(task_active_pid_ns(current));
86460- event->id = atomic64_inc_return(&perf_event_id);
86461+ event->id = atomic64_inc_return_unchecked(&perf_event_id);
86462
86463 event->state = PERF_EVENT_STATE_INACTIVE;
86464
86465@@ -7004,6 +7011,11 @@ SYSCALL_DEFINE5(perf_event_open,
86466 if (flags & ~PERF_FLAG_ALL)
86467 return -EINVAL;
86468
86469+#ifdef CONFIG_GRKERNSEC_PERF_HARDEN
86470+ if (perf_paranoid_any() && !capable(CAP_SYS_ADMIN))
86471+ return -EACCES;
86472+#endif
86473+
86474 err = perf_copy_attr(attr_uptr, &attr);
86475 if (err)
86476 return err;
86477@@ -7339,10 +7351,10 @@ static void sync_child_event(struct perf_event *child_event,
86478 /*
86479 * Add back the child's count to the parent's count:
86480 */
86481- atomic64_add(child_val, &parent_event->child_count);
86482- atomic64_add(child_event->total_time_enabled,
86483+ atomic64_add_unchecked(child_val, &parent_event->child_count);
86484+ atomic64_add_unchecked(child_event->total_time_enabled,
86485 &parent_event->child_total_time_enabled);
86486- atomic64_add(child_event->total_time_running,
86487+ atomic64_add_unchecked(child_event->total_time_running,
86488 &parent_event->child_total_time_running);
86489
86490 /*
86491diff --git a/kernel/events/internal.h b/kernel/events/internal.h
86492index 569b2187..19940d9 100644
86493--- a/kernel/events/internal.h
86494+++ b/kernel/events/internal.h
86495@@ -81,10 +81,10 @@ static inline unsigned long perf_data_size(struct ring_buffer *rb)
86496 return rb->nr_pages << (PAGE_SHIFT + page_order(rb));
86497 }
86498
86499-#define DEFINE_OUTPUT_COPY(func_name, memcpy_func) \
86500+#define DEFINE_OUTPUT_COPY(func_name, memcpy_func, user) \
86501 static inline unsigned long \
86502 func_name(struct perf_output_handle *handle, \
86503- const void *buf, unsigned long len) \
86504+ const void user *buf, unsigned long len) \
86505 { \
86506 unsigned long size, written; \
86507 \
86508@@ -117,7 +117,7 @@ memcpy_common(void *dst, const void *src, unsigned long n)
86509 return 0;
86510 }
86511
86512-DEFINE_OUTPUT_COPY(__output_copy, memcpy_common)
86513+DEFINE_OUTPUT_COPY(__output_copy, memcpy_common, )
86514
86515 static inline unsigned long
86516 memcpy_skip(void *dst, const void *src, unsigned long n)
86517@@ -125,7 +125,7 @@ memcpy_skip(void *dst, const void *src, unsigned long n)
86518 return 0;
86519 }
86520
86521-DEFINE_OUTPUT_COPY(__output_skip, memcpy_skip)
86522+DEFINE_OUTPUT_COPY(__output_skip, memcpy_skip, )
86523
86524 #ifndef arch_perf_out_copy_user
86525 #define arch_perf_out_copy_user arch_perf_out_copy_user
86526@@ -143,7 +143,7 @@ arch_perf_out_copy_user(void *dst, const void *src, unsigned long n)
86527 }
86528 #endif
86529
86530-DEFINE_OUTPUT_COPY(__output_copy_user, arch_perf_out_copy_user)
86531+DEFINE_OUTPUT_COPY(__output_copy_user, arch_perf_out_copy_user, __user)
86532
86533 /* Callchain handling */
86534 extern struct perf_callchain_entry *
86535diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
86536index 307d87c..6466cbe 100644
86537--- a/kernel/events/uprobes.c
86538+++ b/kernel/events/uprobes.c
86539@@ -1666,7 +1666,7 @@ static int is_trap_at_addr(struct mm_struct *mm, unsigned long vaddr)
86540 {
86541 struct page *page;
86542 uprobe_opcode_t opcode;
86543- int result;
86544+ long result;
86545
86546 pagefault_disable();
86547 result = __copy_from_user_inatomic(&opcode, (void __user*)vaddr,
86548diff --git a/kernel/exit.c b/kernel/exit.c
86549index 81b3d67..ef189a4 100644
86550--- a/kernel/exit.c
86551+++ b/kernel/exit.c
86552@@ -173,6 +173,10 @@ void release_task(struct task_struct * p)
86553 struct task_struct *leader;
86554 int zap_leader;
86555 repeat:
86556+#ifdef CONFIG_NET
86557+ gr_del_task_from_ip_table(p);
86558+#endif
86559+
86560 /* don't need to get the RCU readlock here - the process is dead and
86561 * can't be modifying its own credentials. But shut RCU-lockdep up */
86562 rcu_read_lock();
86563@@ -330,7 +334,7 @@ int allow_signal(int sig)
86564 * know it'll be handled, so that they don't get converted to
86565 * SIGKILL or just silently dropped.
86566 */
86567- current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
86568+ current->sighand->action[(sig)-1].sa.sa_handler = (__force void __user *)2;
86569 recalc_sigpending();
86570 spin_unlock_irq(&current->sighand->siglock);
86571 return 0;
86572@@ -706,6 +710,8 @@ void do_exit(long code)
86573 struct task_struct *tsk = current;
86574 int group_dead;
86575
86576+ set_fs(USER_DS);
86577+
86578 profile_task_exit(tsk);
86579
86580 WARN_ON(blk_needs_flush_plug(tsk));
86581@@ -722,7 +728,6 @@ void do_exit(long code)
86582 * mm_release()->clear_child_tid() from writing to a user-controlled
86583 * kernel address.
86584 */
86585- set_fs(USER_DS);
86586
86587 ptrace_event(PTRACE_EVENT_EXIT, code);
86588
86589@@ -781,6 +786,9 @@ void do_exit(long code)
86590 tsk->exit_code = code;
86591 taskstats_exit(tsk, group_dead);
86592
86593+ gr_acl_handle_psacct(tsk, code);
86594+ gr_acl_handle_exit();
86595+
86596 exit_mm(tsk);
86597
86598 if (group_dead)
86599@@ -900,7 +908,7 @@ SYSCALL_DEFINE1(exit, int, error_code)
86600 * Take down every thread in the group. This is called by fatal signals
86601 * as well as by sys_exit_group (below).
86602 */
86603-void
86604+__noreturn void
86605 do_group_exit(int exit_code)
86606 {
86607 struct signal_struct *sig = current->signal;
86608diff --git a/kernel/fork.c b/kernel/fork.c
86609index a17621c..d9e4b37 100644
86610--- a/kernel/fork.c
86611+++ b/kernel/fork.c
86612@@ -137,6 +137,18 @@ void __weak arch_release_thread_info(struct thread_info *ti)
86613 {
86614 }
86615
86616+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
86617+static struct thread_info *alloc_thread_info_node(struct task_struct *tsk,
86618+ int node)
86619+{
86620+ return vmalloc_stack(node);
86621+}
86622+
86623+static inline void free_thread_info(struct thread_info *ti)
86624+{
86625+ vfree(ti);
86626+}
86627+#else
86628 #ifndef CONFIG_ARCH_THREAD_INFO_ALLOCATOR
86629
86630 /*
86631@@ -179,6 +191,7 @@ void thread_info_cache_init(void)
86632 }
86633 # endif
86634 #endif
86635+#endif
86636
86637 /* SLAB cache for signal_struct structures (tsk->signal) */
86638 static struct kmem_cache *signal_cachep;
86639@@ -200,9 +213,11 @@ static struct kmem_cache *mm_cachep;
86640
86641 static void account_kernel_stack(struct thread_info *ti, int account)
86642 {
86643+#ifndef CONFIG_GRKERNSEC_KSTACKOVERFLOW
86644 struct zone *zone = page_zone(virt_to_page(ti));
86645
86646 mod_zone_page_state(zone, NR_KERNEL_STACK, account);
86647+#endif
86648 }
86649
86650 void free_task(struct task_struct *tsk)
86651@@ -319,7 +334,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
86652 *stackend = STACK_END_MAGIC; /* for overflow detection */
86653
86654 #ifdef CONFIG_CC_STACKPROTECTOR
86655- tsk->stack_canary = get_random_int();
86656+ tsk->stack_canary = pax_get_random_long();
86657 #endif
86658
86659 /*
86660@@ -345,12 +360,80 @@ free_tsk:
86661 }
86662
86663 #ifdef CONFIG_MMU
86664-static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
86665+static struct vm_area_struct *dup_vma(struct mm_struct *mm, struct mm_struct *oldmm, struct vm_area_struct *mpnt)
86666+{
86667+ struct vm_area_struct *tmp;
86668+ unsigned long charge;
86669+ struct file *file;
86670+ int retval;
86671+
86672+ charge = 0;
86673+ if (mpnt->vm_flags & VM_ACCOUNT) {
86674+ unsigned long len = vma_pages(mpnt);
86675+
86676+ if (security_vm_enough_memory_mm(oldmm, len)) /* sic */
86677+ goto fail_nomem;
86678+ charge = len;
86679+ }
86680+ tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
86681+ if (!tmp)
86682+ goto fail_nomem;
86683+ *tmp = *mpnt;
86684+ tmp->vm_mm = mm;
86685+ INIT_LIST_HEAD(&tmp->anon_vma_chain);
86686+ retval = vma_dup_policy(mpnt, tmp);
86687+ if (retval)
86688+ goto fail_nomem_policy;
86689+ if (anon_vma_fork(tmp, mpnt))
86690+ goto fail_nomem_anon_vma_fork;
86691+ tmp->vm_flags &= ~VM_LOCKED;
86692+ tmp->vm_next = tmp->vm_prev = NULL;
86693+ tmp->vm_mirror = NULL;
86694+ file = tmp->vm_file;
86695+ if (file) {
86696+ struct inode *inode = file_inode(file);
86697+ struct address_space *mapping = file->f_mapping;
86698+
86699+ get_file(file);
86700+ if (tmp->vm_flags & VM_DENYWRITE)
86701+ atomic_dec(&inode->i_writecount);
86702+ mutex_lock(&mapping->i_mmap_mutex);
86703+ if (tmp->vm_flags & VM_SHARED)
86704+ mapping->i_mmap_writable++;
86705+ flush_dcache_mmap_lock(mapping);
86706+ /* insert tmp into the share list, just after mpnt */
86707+ if (unlikely(tmp->vm_flags & VM_NONLINEAR))
86708+ vma_nonlinear_insert(tmp, &mapping->i_mmap_nonlinear);
86709+ else
86710+ vma_interval_tree_insert_after(tmp, mpnt, &mapping->i_mmap);
86711+ flush_dcache_mmap_unlock(mapping);
86712+ mutex_unlock(&mapping->i_mmap_mutex);
86713+ }
86714+
86715+ /*
86716+ * Clear hugetlb-related page reserves for children. This only
86717+ * affects MAP_PRIVATE mappings. Faults generated by the child
86718+ * are not guaranteed to succeed, even if read-only
86719+ */
86720+ if (is_vm_hugetlb_page(tmp))
86721+ reset_vma_resv_huge_pages(tmp);
86722+
86723+ return tmp;
86724+
86725+fail_nomem_anon_vma_fork:
86726+ mpol_put(vma_policy(tmp));
86727+fail_nomem_policy:
86728+ kmem_cache_free(vm_area_cachep, tmp);
86729+fail_nomem:
86730+ vm_unacct_memory(charge);
86731+ return NULL;
86732+}
86733+
86734+static __latent_entropy int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
86735 {
86736 struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
86737 struct rb_node **rb_link, *rb_parent;
86738 int retval;
86739- unsigned long charge;
86740
86741 uprobe_start_dup_mmap();
86742 down_write(&oldmm->mmap_sem);
86743@@ -379,55 +462,15 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
86744
86745 prev = NULL;
86746 for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
86747- struct file *file;
86748-
86749 if (mpnt->vm_flags & VM_DONTCOPY) {
86750 vm_stat_account(mm, mpnt->vm_flags, mpnt->vm_file,
86751 -vma_pages(mpnt));
86752 continue;
86753 }
86754- charge = 0;
86755- if (mpnt->vm_flags & VM_ACCOUNT) {
86756- unsigned long len = vma_pages(mpnt);
86757-
86758- if (security_vm_enough_memory_mm(oldmm, len)) /* sic */
86759- goto fail_nomem;
86760- charge = len;
86761- }
86762- tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
86763- if (!tmp)
86764- goto fail_nomem;
86765- *tmp = *mpnt;
86766- INIT_LIST_HEAD(&tmp->anon_vma_chain);
86767- retval = vma_dup_policy(mpnt, tmp);
86768- if (retval)
86769- goto fail_nomem_policy;
86770- tmp->vm_mm = mm;
86771- if (anon_vma_fork(tmp, mpnt))
86772- goto fail_nomem_anon_vma_fork;
86773- tmp->vm_flags &= ~VM_LOCKED;
86774- tmp->vm_next = tmp->vm_prev = NULL;
86775- file = tmp->vm_file;
86776- if (file) {
86777- struct inode *inode = file_inode(file);
86778- struct address_space *mapping = file->f_mapping;
86779-
86780- get_file(file);
86781- if (tmp->vm_flags & VM_DENYWRITE)
86782- atomic_dec(&inode->i_writecount);
86783- mutex_lock(&mapping->i_mmap_mutex);
86784- if (tmp->vm_flags & VM_SHARED)
86785- mapping->i_mmap_writable++;
86786- flush_dcache_mmap_lock(mapping);
86787- /* insert tmp into the share list, just after mpnt */
86788- if (unlikely(tmp->vm_flags & VM_NONLINEAR))
86789- vma_nonlinear_insert(tmp,
86790- &mapping->i_mmap_nonlinear);
86791- else
86792- vma_interval_tree_insert_after(tmp, mpnt,
86793- &mapping->i_mmap);
86794- flush_dcache_mmap_unlock(mapping);
86795- mutex_unlock(&mapping->i_mmap_mutex);
86796+ tmp = dup_vma(mm, oldmm, mpnt);
86797+ if (!tmp) {
86798+ retval = -ENOMEM;
86799+ goto out;
86800 }
86801
86802 /*
86803@@ -459,6 +502,31 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
86804 if (retval)
86805 goto out;
86806 }
86807+
86808+#ifdef CONFIG_PAX_SEGMEXEC
86809+ if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
86810+ struct vm_area_struct *mpnt_m;
86811+
86812+ for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
86813+ BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
86814+
86815+ if (!mpnt->vm_mirror)
86816+ continue;
86817+
86818+ if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
86819+ BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
86820+ mpnt->vm_mirror = mpnt_m;
86821+ } else {
86822+ BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
86823+ mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
86824+ mpnt_m->vm_mirror->vm_mirror = mpnt_m;
86825+ mpnt->vm_mirror->vm_mirror = mpnt;
86826+ }
86827+ }
86828+ BUG_ON(mpnt_m);
86829+ }
86830+#endif
86831+
86832 /* a new mm has just been created */
86833 arch_dup_mmap(oldmm, mm);
86834 retval = 0;
86835@@ -468,14 +536,6 @@ out:
86836 up_write(&oldmm->mmap_sem);
86837 uprobe_end_dup_mmap();
86838 return retval;
86839-fail_nomem_anon_vma_fork:
86840- mpol_put(vma_policy(tmp));
86841-fail_nomem_policy:
86842- kmem_cache_free(vm_area_cachep, tmp);
86843-fail_nomem:
86844- retval = -ENOMEM;
86845- vm_unacct_memory(charge);
86846- goto out;
86847 }
86848
86849 static inline int mm_alloc_pgd(struct mm_struct *mm)
86850@@ -689,8 +749,8 @@ struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
86851 return ERR_PTR(err);
86852
86853 mm = get_task_mm(task);
86854- if (mm && mm != current->mm &&
86855- !ptrace_may_access(task, mode)) {
86856+ if (mm && ((mm != current->mm && !ptrace_may_access(task, mode)) ||
86857+ (mode == PTRACE_MODE_ATTACH && (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task))))) {
86858 mmput(mm);
86859 mm = ERR_PTR(-EACCES);
86860 }
86861@@ -906,13 +966,20 @@ static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
86862 spin_unlock(&fs->lock);
86863 return -EAGAIN;
86864 }
86865- fs->users++;
86866+ atomic_inc(&fs->users);
86867 spin_unlock(&fs->lock);
86868 return 0;
86869 }
86870 tsk->fs = copy_fs_struct(fs);
86871 if (!tsk->fs)
86872 return -ENOMEM;
86873+ /* Carry through gr_chroot_dentry and is_chrooted instead
86874+ of recomputing it here. Already copied when the task struct
86875+ is duplicated. This allows pivot_root to not be treated as
86876+ a chroot
86877+ */
86878+ //gr_set_chroot_entries(tsk, &tsk->fs->root);
86879+
86880 return 0;
86881 }
86882
86883@@ -1130,7 +1197,7 @@ init_task_pid(struct task_struct *task, enum pid_type type, struct pid *pid)
86884 * parts of the process environment (as per the clone
86885 * flags). The actual kick-off is left to the caller.
86886 */
86887-static struct task_struct *copy_process(unsigned long clone_flags,
86888+static __latent_entropy struct task_struct *copy_process(unsigned long clone_flags,
86889 unsigned long stack_start,
86890 unsigned long stack_size,
86891 int __user *child_tidptr,
86892@@ -1202,6 +1269,9 @@ static struct task_struct *copy_process(unsigned long clone_flags,
86893 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
86894 #endif
86895 retval = -EAGAIN;
86896+
86897+ gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
86898+
86899 if (atomic_read(&p->real_cred->user->processes) >=
86900 task_rlimit(p, RLIMIT_NPROC)) {
86901 if (p->real_cred->user != INIT_USER &&
86902@@ -1449,6 +1519,11 @@ static struct task_struct *copy_process(unsigned long clone_flags,
86903 goto bad_fork_free_pid;
86904 }
86905
86906+ /* synchronizes with gr_set_acls()
86907+ we need to call this past the point of no return for fork()
86908+ */
86909+ gr_copy_label(p);
86910+
86911 if (likely(p->pid)) {
86912 ptrace_init_task(p, (clone_flags & CLONE_PTRACE) || trace);
86913
86914@@ -1537,6 +1612,8 @@ bad_fork_cleanup_count:
86915 bad_fork_free:
86916 free_task(p);
86917 fork_out:
86918+ gr_log_forkfail(retval);
86919+
86920 return ERR_PTR(retval);
86921 }
86922
86923@@ -1598,6 +1675,7 @@ long do_fork(unsigned long clone_flags,
86924
86925 p = copy_process(clone_flags, stack_start, stack_size,
86926 child_tidptr, NULL, trace);
86927+ add_latent_entropy();
86928 /*
86929 * Do this prior waking up the new thread - the thread pointer
86930 * might get invalid after that point, if the thread exits quickly.
86931@@ -1612,6 +1690,8 @@ long do_fork(unsigned long clone_flags,
86932 if (clone_flags & CLONE_PARENT_SETTID)
86933 put_user(nr, parent_tidptr);
86934
86935+ gr_handle_brute_check();
86936+
86937 if (clone_flags & CLONE_VFORK) {
86938 p->vfork_done = &vfork;
86939 init_completion(&vfork);
86940@@ -1728,7 +1808,7 @@ void __init proc_caches_init(void)
86941 mm_cachep = kmem_cache_create("mm_struct",
86942 sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN,
86943 SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL);
86944- vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC);
86945+ vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC | SLAB_NO_SANITIZE);
86946 mmap_init();
86947 nsproxy_cache_init();
86948 }
86949@@ -1768,7 +1848,7 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
86950 return 0;
86951
86952 /* don't need lock here; in the worst case we'll do useless copy */
86953- if (fs->users == 1)
86954+ if (atomic_read(&fs->users) == 1)
86955 return 0;
86956
86957 *new_fsp = copy_fs_struct(fs);
86958@@ -1875,7 +1955,8 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
86959 fs = current->fs;
86960 spin_lock(&fs->lock);
86961 current->fs = new_fs;
86962- if (--fs->users)
86963+ gr_set_chroot_entries(current, &current->fs->root);
86964+ if (atomic_dec_return(&fs->users))
86965 new_fs = NULL;
86966 else
86967 new_fs = fs;
86968diff --git a/kernel/futex.c b/kernel/futex.c
86969index e3087af..8e3b90f 100644
86970--- a/kernel/futex.c
86971+++ b/kernel/futex.c
86972@@ -54,6 +54,7 @@
86973 #include <linux/mount.h>
86974 #include <linux/pagemap.h>
86975 #include <linux/syscalls.h>
86976+#include <linux/ptrace.h>
86977 #include <linux/signal.h>
86978 #include <linux/export.h>
86979 #include <linux/magic.h>
86980@@ -188,7 +189,7 @@ struct futex_pi_state {
86981 atomic_t refcount;
86982
86983 union futex_key key;
86984-};
86985+} __randomize_layout;
86986
86987 /**
86988 * struct futex_q - The hashed futex queue entry, one per waiting task
86989@@ -222,7 +223,7 @@ struct futex_q {
86990 struct rt_mutex_waiter *rt_waiter;
86991 union futex_key *requeue_pi_key;
86992 u32 bitset;
86993-};
86994+} __randomize_layout;
86995
86996 static const struct futex_q futex_q_init = {
86997 /* list gets initialized in queue_me()*/
86998@@ -380,6 +381,11 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
86999 struct page *page, *page_head;
87000 int err, ro = 0;
87001
87002+#ifdef CONFIG_PAX_SEGMEXEC
87003+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
87004+ return -EFAULT;
87005+#endif
87006+
87007 /*
87008 * The futex address must be "naturally" aligned.
87009 */
87010@@ -579,7 +585,7 @@ static int cmpxchg_futex_value_locked(u32 *curval, u32 __user *uaddr,
87011
87012 static int get_futex_value_locked(u32 *dest, u32 __user *from)
87013 {
87014- int ret;
87015+ unsigned long ret;
87016
87017 pagefault_disable();
87018 ret = __copy_from_user_inatomic(dest, from, sizeof(u32));
87019@@ -3019,6 +3025,7 @@ static void __init futex_detect_cmpxchg(void)
87020 {
87021 #ifndef CONFIG_HAVE_FUTEX_CMPXCHG
87022 u32 curval;
87023+ mm_segment_t oldfs;
87024
87025 /*
87026 * This will fail and we want it. Some arch implementations do
87027@@ -3030,8 +3037,11 @@ static void __init futex_detect_cmpxchg(void)
87028 * implementation, the non-functional ones will return
87029 * -ENOSYS.
87030 */
87031+ oldfs = get_fs();
87032+ set_fs(USER_DS);
87033 if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
87034 futex_cmpxchg_enabled = 1;
87035+ set_fs(oldfs);
87036 #endif
87037 }
87038
87039diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c
87040index f9f44fd..29885e4 100644
87041--- a/kernel/futex_compat.c
87042+++ b/kernel/futex_compat.c
87043@@ -32,7 +32,7 @@ fetch_robust_entry(compat_uptr_t *uentry, struct robust_list __user **entry,
87044 return 0;
87045 }
87046
87047-static void __user *futex_uaddr(struct robust_list __user *entry,
87048+static void __user __intentional_overflow(-1) *futex_uaddr(struct robust_list __user *entry,
87049 compat_long_t futex_offset)
87050 {
87051 compat_uptr_t base = ptr_to_compat(entry);
87052diff --git a/kernel/gcov/base.c b/kernel/gcov/base.c
87053index f45b75b..bfac6d5 100644
87054--- a/kernel/gcov/base.c
87055+++ b/kernel/gcov/base.c
87056@@ -108,11 +108,6 @@ void gcov_enable_events(void)
87057 }
87058
87059 #ifdef CONFIG_MODULES
87060-static inline int within(void *addr, void *start, unsigned long size)
87061-{
87062- return ((addr >= start) && (addr < start + size));
87063-}
87064-
87065 /* Update list and generate events when modules are unloaded. */
87066 static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
87067 void *data)
87068@@ -127,7 +122,7 @@ static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
87069
87070 /* Remove entries located in module from linked list. */
87071 while ((info = gcov_info_next(info))) {
87072- if (within(info, mod->module_core, mod->core_size)) {
87073+ if (within_module_core_rw((unsigned long)info, mod)) {
87074 gcov_info_unlink(prev, info);
87075 if (gcov_events_enabled)
87076 gcov_event(GCOV_REMOVE, info);
87077diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
87078index 04d0374..e7c3725 100644
87079--- a/kernel/hrtimer.c
87080+++ b/kernel/hrtimer.c
87081@@ -1461,7 +1461,7 @@ void hrtimer_peek_ahead_timers(void)
87082 local_irq_restore(flags);
87083 }
87084
87085-static void run_hrtimer_softirq(struct softirq_action *h)
87086+static __latent_entropy void run_hrtimer_softirq(void)
87087 {
87088 hrtimer_peek_ahead_timers();
87089 }
87090diff --git a/kernel/irq_work.c b/kernel/irq_work.c
87091index 55fcce6..0e4cf34 100644
87092--- a/kernel/irq_work.c
87093+++ b/kernel/irq_work.c
87094@@ -189,12 +189,13 @@ static int irq_work_cpu_notify(struct notifier_block *self,
87095 return NOTIFY_OK;
87096 }
87097
87098-static struct notifier_block cpu_notify;
87099+static struct notifier_block cpu_notify = {
87100+ .notifier_call = irq_work_cpu_notify,
87101+ .priority = 0,
87102+};
87103
87104 static __init int irq_work_init_cpu_notifier(void)
87105 {
87106- cpu_notify.notifier_call = irq_work_cpu_notify;
87107- cpu_notify.priority = 0;
87108 register_cpu_notifier(&cpu_notify);
87109 return 0;
87110 }
87111diff --git a/kernel/jump_label.c b/kernel/jump_label.c
87112index 9019f15..9a3c42e 100644
87113--- a/kernel/jump_label.c
87114+++ b/kernel/jump_label.c
87115@@ -14,6 +14,7 @@
87116 #include <linux/err.h>
87117 #include <linux/static_key.h>
87118 #include <linux/jump_label_ratelimit.h>
87119+#include <linux/mm.h>
87120
87121 #ifdef HAVE_JUMP_LABEL
87122
87123@@ -51,7 +52,9 @@ jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop)
87124
87125 size = (((unsigned long)stop - (unsigned long)start)
87126 / sizeof(struct jump_entry));
87127+ pax_open_kernel();
87128 sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
87129+ pax_close_kernel();
87130 }
87131
87132 static void jump_label_update(struct static_key *key, int enable);
87133@@ -363,10 +366,12 @@ static void jump_label_invalidate_module_init(struct module *mod)
87134 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
87135 struct jump_entry *iter;
87136
87137+ pax_open_kernel();
87138 for (iter = iter_start; iter < iter_stop; iter++) {
87139 if (within_module_init(iter->code, mod))
87140 iter->code = 0;
87141 }
87142+ pax_close_kernel();
87143 }
87144
87145 static int
87146diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
87147index 3127ad5..159d880 100644
87148--- a/kernel/kallsyms.c
87149+++ b/kernel/kallsyms.c
87150@@ -11,6 +11,9 @@
87151 * Changed the compression method from stem compression to "table lookup"
87152 * compression (see scripts/kallsyms.c for a more complete description)
87153 */
87154+#ifdef CONFIG_GRKERNSEC_HIDESYM
87155+#define __INCLUDED_BY_HIDESYM 1
87156+#endif
87157 #include <linux/kallsyms.h>
87158 #include <linux/module.h>
87159 #include <linux/init.h>
87160@@ -53,12 +56,33 @@ extern const unsigned long kallsyms_markers[] __attribute__((weak));
87161
87162 static inline int is_kernel_inittext(unsigned long addr)
87163 {
87164+ if (system_state != SYSTEM_BOOTING)
87165+ return 0;
87166+
87167 if (addr >= (unsigned long)_sinittext
87168 && addr <= (unsigned long)_einittext)
87169 return 1;
87170 return 0;
87171 }
87172
87173+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
87174+#ifdef CONFIG_MODULES
87175+static inline int is_module_text(unsigned long addr)
87176+{
87177+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
87178+ return 1;
87179+
87180+ addr = ktla_ktva(addr);
87181+ return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
87182+}
87183+#else
87184+static inline int is_module_text(unsigned long addr)
87185+{
87186+ return 0;
87187+}
87188+#endif
87189+#endif
87190+
87191 static inline int is_kernel_text(unsigned long addr)
87192 {
87193 if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
87194@@ -69,13 +93,28 @@ static inline int is_kernel_text(unsigned long addr)
87195
87196 static inline int is_kernel(unsigned long addr)
87197 {
87198+
87199+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
87200+ if (is_kernel_text(addr) || is_kernel_inittext(addr))
87201+ return 1;
87202+
87203+ if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
87204+#else
87205 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
87206+#endif
87207+
87208 return 1;
87209 return in_gate_area_no_mm(addr);
87210 }
87211
87212 static int is_ksym_addr(unsigned long addr)
87213 {
87214+
87215+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
87216+ if (is_module_text(addr))
87217+ return 0;
87218+#endif
87219+
87220 if (all_var)
87221 return is_kernel(addr);
87222
87223@@ -480,7 +519,6 @@ static unsigned long get_ksymbol_core(struct kallsym_iter *iter)
87224
87225 static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
87226 {
87227- iter->name[0] = '\0';
87228 iter->nameoff = get_symbol_offset(new_pos);
87229 iter->pos = new_pos;
87230 }
87231@@ -528,6 +566,11 @@ static int s_show(struct seq_file *m, void *p)
87232 {
87233 struct kallsym_iter *iter = m->private;
87234
87235+#ifdef CONFIG_GRKERNSEC_HIDESYM
87236+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID))
87237+ return 0;
87238+#endif
87239+
87240 /* Some debugging symbols have no name. Ignore them. */
87241 if (!iter->name[0])
87242 return 0;
87243@@ -541,6 +584,7 @@ static int s_show(struct seq_file *m, void *p)
87244 */
87245 type = iter->exported ? toupper(iter->type) :
87246 tolower(iter->type);
87247+
87248 seq_printf(m, "%pK %c %s\t[%s]\n", (void *)iter->value,
87249 type, iter->name, iter->module_name);
87250 } else
87251@@ -566,7 +610,7 @@ static int kallsyms_open(struct inode *inode, struct file *file)
87252 struct kallsym_iter *iter;
87253 int ret;
87254
87255- iter = kmalloc(sizeof(*iter), GFP_KERNEL);
87256+ iter = kzalloc(sizeof(*iter), GFP_KERNEL);
87257 if (!iter)
87258 return -ENOMEM;
87259 reset_iter(iter, 0);
87260diff --git a/kernel/kcmp.c b/kernel/kcmp.c
87261index e30ac0f..3528cac 100644
87262--- a/kernel/kcmp.c
87263+++ b/kernel/kcmp.c
87264@@ -99,6 +99,10 @@ SYSCALL_DEFINE5(kcmp, pid_t, pid1, pid_t, pid2, int, type,
87265 struct task_struct *task1, *task2;
87266 int ret;
87267
87268+#ifdef CONFIG_GRKERNSEC
87269+ return -ENOSYS;
87270+#endif
87271+
87272 rcu_read_lock();
87273
87274 /*
87275diff --git a/kernel/kexec.c b/kernel/kexec.c
87276index 18ff0b9..40b0eab 100644
87277--- a/kernel/kexec.c
87278+++ b/kernel/kexec.c
87279@@ -1045,7 +1045,8 @@ asmlinkage long compat_sys_kexec_load(unsigned long entry,
87280 unsigned long flags)
87281 {
87282 struct compat_kexec_segment in;
87283- struct kexec_segment out, __user *ksegments;
87284+ struct kexec_segment out;
87285+ struct kexec_segment __user *ksegments;
87286 unsigned long i, result;
87287
87288 /* Don't allow clients that don't understand the native
87289diff --git a/kernel/kmod.c b/kernel/kmod.c
87290index 6b375af..eaff670 100644
87291--- a/kernel/kmod.c
87292+++ b/kernel/kmod.c
87293@@ -75,7 +75,7 @@ static void free_modprobe_argv(struct subprocess_info *info)
87294 kfree(info->argv);
87295 }
87296
87297-static int call_modprobe(char *module_name, int wait)
87298+static int call_modprobe(char *module_name, char *module_param, int wait)
87299 {
87300 struct subprocess_info *info;
87301 static char *envp[] = {
87302@@ -85,7 +85,7 @@ static int call_modprobe(char *module_name, int wait)
87303 NULL
87304 };
87305
87306- char **argv = kmalloc(sizeof(char *[5]), GFP_KERNEL);
87307+ char **argv = kmalloc(sizeof(char *[6]), GFP_KERNEL);
87308 if (!argv)
87309 goto out;
87310
87311@@ -97,7 +97,8 @@ static int call_modprobe(char *module_name, int wait)
87312 argv[1] = "-q";
87313 argv[2] = "--";
87314 argv[3] = module_name; /* check free_modprobe_argv() */
87315- argv[4] = NULL;
87316+ argv[4] = module_param;
87317+ argv[5] = NULL;
87318
87319 info = call_usermodehelper_setup(modprobe_path, argv, envp, GFP_KERNEL,
87320 NULL, free_modprobe_argv, NULL);
87321@@ -129,9 +130,8 @@ out:
87322 * If module auto-loading support is disabled then this function
87323 * becomes a no-operation.
87324 */
87325-int __request_module(bool wait, const char *fmt, ...)
87326+static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap)
87327 {
87328- va_list args;
87329 char module_name[MODULE_NAME_LEN];
87330 unsigned int max_modprobes;
87331 int ret;
87332@@ -150,9 +150,7 @@ int __request_module(bool wait, const char *fmt, ...)
87333 if (!modprobe_path[0])
87334 return 0;
87335
87336- va_start(args, fmt);
87337- ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
87338- va_end(args);
87339+ ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap);
87340 if (ret >= MODULE_NAME_LEN)
87341 return -ENAMETOOLONG;
87342
87343@@ -160,6 +158,20 @@ int __request_module(bool wait, const char *fmt, ...)
87344 if (ret)
87345 return ret;
87346
87347+#ifdef CONFIG_GRKERNSEC_MODHARDEN
87348+ if (uid_eq(current_uid(), GLOBAL_ROOT_UID)) {
87349+ /* hack to workaround consolekit/udisks stupidity */
87350+ read_lock(&tasklist_lock);
87351+ if (!strcmp(current->comm, "mount") &&
87352+ current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) {
87353+ read_unlock(&tasklist_lock);
87354+ printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name);
87355+ return -EPERM;
87356+ }
87357+ read_unlock(&tasklist_lock);
87358+ }
87359+#endif
87360+
87361 /* If modprobe needs a service that is in a module, we get a recursive
87362 * loop. Limit the number of running kmod threads to max_threads/2 or
87363 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
87364@@ -188,11 +200,52 @@ int __request_module(bool wait, const char *fmt, ...)
87365
87366 trace_module_request(module_name, wait, _RET_IP_);
87367
87368- ret = call_modprobe(module_name, wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC);
87369+ ret = call_modprobe(module_name, module_param, wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC);
87370
87371 atomic_dec(&kmod_concurrent);
87372 return ret;
87373 }
87374+
87375+int ___request_module(bool wait, char *module_param, const char *fmt, ...)
87376+{
87377+ va_list args;
87378+ int ret;
87379+
87380+ va_start(args, fmt);
87381+ ret = ____request_module(wait, module_param, fmt, args);
87382+ va_end(args);
87383+
87384+ return ret;
87385+}
87386+
87387+int __request_module(bool wait, const char *fmt, ...)
87388+{
87389+ va_list args;
87390+ int ret;
87391+
87392+#ifdef CONFIG_GRKERNSEC_MODHARDEN
87393+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID)) {
87394+ char module_param[MODULE_NAME_LEN];
87395+
87396+ memset(module_param, 0, sizeof(module_param));
87397+
87398+ snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", GR_GLOBAL_UID(current_uid()));
87399+
87400+ va_start(args, fmt);
87401+ ret = ____request_module(wait, module_param, fmt, args);
87402+ va_end(args);
87403+
87404+ return ret;
87405+ }
87406+#endif
87407+
87408+ va_start(args, fmt);
87409+ ret = ____request_module(wait, NULL, fmt, args);
87410+ va_end(args);
87411+
87412+ return ret;
87413+}
87414+
87415 EXPORT_SYMBOL(__request_module);
87416 #endif /* CONFIG_MODULES */
87417
87418@@ -218,6 +271,20 @@ static int ____call_usermodehelper(void *data)
87419 */
87420 set_user_nice(current, 0);
87421
87422+#ifdef CONFIG_GRKERNSEC
87423+ /* this is race-free as far as userland is concerned as we copied
87424+ out the path to be used prior to this point and are now operating
87425+ on that copy
87426+ */
87427+ if ((strncmp(sub_info->path, "/sbin/", 6) && strncmp(sub_info->path, "/usr/lib/", 9) &&
87428+ strncmp(sub_info->path, "/lib/", 5) && strncmp(sub_info->path, "/lib64/", 7) &&
87429+ strcmp(sub_info->path, "/usr/share/apport/apport")) || strstr(sub_info->path, "..")) {
87430+ printk(KERN_ALERT "grsec: denied exec of usermode helper binary %.950s located outside of /sbin and system library paths\n", sub_info->path);
87431+ retval = -EPERM;
87432+ goto fail;
87433+ }
87434+#endif
87435+
87436 retval = -ENOMEM;
87437 new = prepare_kernel_cred(current);
87438 if (!new)
87439@@ -240,8 +307,8 @@ static int ____call_usermodehelper(void *data)
87440 commit_creds(new);
87441
87442 retval = do_execve(getname_kernel(sub_info->path),
87443- (const char __user *const __user *)sub_info->argv,
87444- (const char __user *const __user *)sub_info->envp);
87445+ (const char __user *const __force_user *)sub_info->argv,
87446+ (const char __user *const __force_user *)sub_info->envp);
87447 if (!retval)
87448 return 0;
87449
87450@@ -260,6 +327,10 @@ static int call_helper(void *data)
87451
87452 static void call_usermodehelper_freeinfo(struct subprocess_info *info)
87453 {
87454+#ifdef CONFIG_GRKERNSEC
87455+ kfree(info->path);
87456+ info->path = info->origpath;
87457+#endif
87458 if (info->cleanup)
87459 (*info->cleanup)(info);
87460 kfree(info);
87461@@ -303,7 +374,7 @@ static int wait_for_helper(void *data)
87462 *
87463 * Thus the __user pointer cast is valid here.
87464 */
87465- sys_wait4(pid, (int __user *)&ret, 0, NULL);
87466+ sys_wait4(pid, (int __force_user *)&ret, 0, NULL);
87467
87468 /*
87469 * If ret is 0, either ____call_usermodehelper failed and the
87470@@ -542,7 +613,12 @@ struct subprocess_info *call_usermodehelper_setup(char *path, char **argv,
87471 goto out;
87472
87473 INIT_WORK(&sub_info->work, __call_usermodehelper);
87474+#ifdef CONFIG_GRKERNSEC
87475+ sub_info->origpath = path;
87476+ sub_info->path = kstrdup(path, gfp_mask);
87477+#else
87478 sub_info->path = path;
87479+#endif
87480 sub_info->argv = argv;
87481 sub_info->envp = envp;
87482
87483@@ -650,7 +726,7 @@ EXPORT_SYMBOL(call_usermodehelper);
87484 static int proc_cap_handler(struct ctl_table *table, int write,
87485 void __user *buffer, size_t *lenp, loff_t *ppos)
87486 {
87487- struct ctl_table t;
87488+ ctl_table_no_const t;
87489 unsigned long cap_array[_KERNEL_CAPABILITY_U32S];
87490 kernel_cap_t new_cap;
87491 int err, i;
87492diff --git a/kernel/kprobes.c b/kernel/kprobes.c
87493index ceeadfc..11c18b6 100644
87494--- a/kernel/kprobes.c
87495+++ b/kernel/kprobes.c
87496@@ -31,6 +31,9 @@
87497 * <jkenisto@us.ibm.com> and Prasanna S Panchamukhi
87498 * <prasanna@in.ibm.com> added function-return probes.
87499 */
87500+#ifdef CONFIG_GRKERNSEC_HIDESYM
87501+#define __INCLUDED_BY_HIDESYM 1
87502+#endif
87503 #include <linux/kprobes.h>
87504 #include <linux/hash.h>
87505 #include <linux/init.h>
87506@@ -135,12 +138,12 @@ enum kprobe_slot_state {
87507
87508 static void *alloc_insn_page(void)
87509 {
87510- return module_alloc(PAGE_SIZE);
87511+ return module_alloc_exec(PAGE_SIZE);
87512 }
87513
87514 static void free_insn_page(void *page)
87515 {
87516- module_free(NULL, page);
87517+ module_free_exec(NULL, page);
87518 }
87519
87520 struct kprobe_insn_cache kprobe_insn_slots = {
87521@@ -2151,11 +2154,11 @@ static void __kprobes report_probe(struct seq_file *pi, struct kprobe *p,
87522 kprobe_type = "k";
87523
87524 if (sym)
87525- seq_printf(pi, "%p %s %s+0x%x %s ",
87526+ seq_printf(pi, "%pK %s %s+0x%x %s ",
87527 p->addr, kprobe_type, sym, offset,
87528 (modname ? modname : " "));
87529 else
87530- seq_printf(pi, "%p %s %p ",
87531+ seq_printf(pi, "%pK %s %pK ",
87532 p->addr, kprobe_type, p->addr);
87533
87534 if (!pp)
87535diff --git a/kernel/ksysfs.c b/kernel/ksysfs.c
87536index d945a94..0b7f45f 100644
87537--- a/kernel/ksysfs.c
87538+++ b/kernel/ksysfs.c
87539@@ -46,6 +46,8 @@ static ssize_t uevent_helper_store(struct kobject *kobj,
87540 {
87541 if (count+1 > UEVENT_HELPER_PATH_LEN)
87542 return -ENOENT;
87543+ if (!capable(CAP_SYS_ADMIN))
87544+ return -EPERM;
87545 memcpy(uevent_helper, buf, count);
87546 uevent_helper[count] = '\0';
87547 if (count && uevent_helper[count-1] == '\n')
87548@@ -172,7 +174,7 @@ static ssize_t notes_read(struct file *filp, struct kobject *kobj,
87549 return count;
87550 }
87551
87552-static struct bin_attribute notes_attr = {
87553+static bin_attribute_no_const notes_attr __read_only = {
87554 .attr = {
87555 .name = "notes",
87556 .mode = S_IRUGO,
87557diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
87558index eb8a547..321d8e1 100644
87559--- a/kernel/locking/lockdep.c
87560+++ b/kernel/locking/lockdep.c
87561@@ -597,6 +597,10 @@ static int static_obj(void *obj)
87562 end = (unsigned long) &_end,
87563 addr = (unsigned long) obj;
87564
87565+#ifdef CONFIG_PAX_KERNEXEC
87566+ start = ktla_ktva(start);
87567+#endif
87568+
87569 /*
87570 * static variable?
87571 */
87572@@ -738,6 +742,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
87573 if (!static_obj(lock->key)) {
87574 debug_locks_off();
87575 printk("INFO: trying to register non-static key.\n");
87576+ printk("lock:%pS key:%pS.\n", lock, lock->key);
87577 printk("the code is fine but needs lockdep annotation.\n");
87578 printk("turning off the locking correctness validator.\n");
87579 dump_stack();
87580@@ -3082,7 +3087,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
87581 if (!class)
87582 return 0;
87583 }
87584- atomic_inc((atomic_t *)&class->ops);
87585+ atomic_long_inc_unchecked((atomic_long_unchecked_t *)&class->ops);
87586 if (very_verbose(class)) {
87587 printk("\nacquire class [%p] %s", class->key, class->name);
87588 if (class->name_version > 1)
87589diff --git a/kernel/locking/lockdep_proc.c b/kernel/locking/lockdep_proc.c
87590index ef43ac4..2720dfa 100644
87591--- a/kernel/locking/lockdep_proc.c
87592+++ b/kernel/locking/lockdep_proc.c
87593@@ -65,7 +65,7 @@ static int l_show(struct seq_file *m, void *v)
87594 return 0;
87595 }
87596
87597- seq_printf(m, "%p", class->key);
87598+ seq_printf(m, "%pK", class->key);
87599 #ifdef CONFIG_DEBUG_LOCKDEP
87600 seq_printf(m, " OPS:%8ld", class->ops);
87601 #endif
87602@@ -83,7 +83,7 @@ static int l_show(struct seq_file *m, void *v)
87603
87604 list_for_each_entry(entry, &class->locks_after, entry) {
87605 if (entry->distance == 1) {
87606- seq_printf(m, " -> [%p] ", entry->class->key);
87607+ seq_printf(m, " -> [%pK] ", entry->class->key);
87608 print_name(m, entry->class);
87609 seq_puts(m, "\n");
87610 }
87611@@ -152,7 +152,7 @@ static int lc_show(struct seq_file *m, void *v)
87612 if (!class->key)
87613 continue;
87614
87615- seq_printf(m, "[%p] ", class->key);
87616+ seq_printf(m, "[%pK] ", class->key);
87617 print_name(m, class);
87618 seq_puts(m, "\n");
87619 }
87620@@ -496,7 +496,7 @@ static void seq_stats(struct seq_file *m, struct lock_stat_data *data)
87621 if (!i)
87622 seq_line(m, '-', 40-namelen, namelen);
87623
87624- snprintf(ip, sizeof(ip), "[<%p>]",
87625+ snprintf(ip, sizeof(ip), "[<%pK>]",
87626 (void *)class->contention_point[i]);
87627 seq_printf(m, "%40s %14lu %29s %pS\n",
87628 name, stats->contention_point[i],
87629@@ -511,7 +511,7 @@ static void seq_stats(struct seq_file *m, struct lock_stat_data *data)
87630 if (!i)
87631 seq_line(m, '-', 40-namelen, namelen);
87632
87633- snprintf(ip, sizeof(ip), "[<%p>]",
87634+ snprintf(ip, sizeof(ip), "[<%pK>]",
87635 (void *)class->contending_point[i]);
87636 seq_printf(m, "%40s %14lu %29s %pS\n",
87637 name, stats->contending_point[i],
87638diff --git a/kernel/locking/mutex-debug.c b/kernel/locking/mutex-debug.c
87639index faf6f5b..dc9070a 100644
87640--- a/kernel/locking/mutex-debug.c
87641+++ b/kernel/locking/mutex-debug.c
87642@@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mutex_waiter *waiter)
87643 }
87644
87645 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
87646- struct thread_info *ti)
87647+ struct task_struct *task)
87648 {
87649 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
87650
87651 /* Mark the current thread as blocked on the lock: */
87652- ti->task->blocked_on = waiter;
87653+ task->blocked_on = waiter;
87654 }
87655
87656 void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
87657- struct thread_info *ti)
87658+ struct task_struct *task)
87659 {
87660 DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
87661- DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
87662- DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
87663- ti->task->blocked_on = NULL;
87664+ DEBUG_LOCKS_WARN_ON(waiter->task != task);
87665+ DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
87666+ task->blocked_on = NULL;
87667
87668 list_del_init(&waiter->list);
87669 waiter->task = NULL;
87670diff --git a/kernel/locking/mutex-debug.h b/kernel/locking/mutex-debug.h
87671index 0799fd3..d06ae3b 100644
87672--- a/kernel/locking/mutex-debug.h
87673+++ b/kernel/locking/mutex-debug.h
87674@@ -20,9 +20,9 @@ extern void debug_mutex_wake_waiter(struct mutex *lock,
87675 extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
87676 extern void debug_mutex_add_waiter(struct mutex *lock,
87677 struct mutex_waiter *waiter,
87678- struct thread_info *ti);
87679+ struct task_struct *task);
87680 extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
87681- struct thread_info *ti);
87682+ struct task_struct *task);
87683 extern void debug_mutex_unlock(struct mutex *lock);
87684 extern void debug_mutex_init(struct mutex *lock, const char *name,
87685 struct lock_class_key *key);
87686diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
87687index 4dd6e4c..df52693 100644
87688--- a/kernel/locking/mutex.c
87689+++ b/kernel/locking/mutex.c
87690@@ -135,7 +135,7 @@ void mspin_lock(struct mspin_node **lock, struct mspin_node *node)
87691 node->locked = 1;
87692 return;
87693 }
87694- ACCESS_ONCE(prev->next) = node;
87695+ ACCESS_ONCE_RW(prev->next) = node;
87696 smp_wmb();
87697 /* Wait until the lock holder passes the lock down */
87698 while (!ACCESS_ONCE(node->locked))
87699@@ -156,7 +156,7 @@ static void mspin_unlock(struct mspin_node **lock, struct mspin_node *node)
87700 while (!(next = ACCESS_ONCE(node->next)))
87701 arch_mutex_cpu_relax();
87702 }
87703- ACCESS_ONCE(next->locked) = 1;
87704+ ACCESS_ONCE_RW(next->locked) = 1;
87705 smp_wmb();
87706 }
87707
87708@@ -520,7 +520,7 @@ slowpath:
87709 goto skip_wait;
87710
87711 debug_mutex_lock_common(lock, &waiter);
87712- debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
87713+ debug_mutex_add_waiter(lock, &waiter, task);
87714
87715 /* add waiting tasks to the end of the waitqueue (FIFO): */
87716 list_add_tail(&waiter.list, &lock->wait_list);
87717@@ -564,7 +564,7 @@ slowpath:
87718 schedule_preempt_disabled();
87719 spin_lock_mutex(&lock->wait_lock, flags);
87720 }
87721- mutex_remove_waiter(lock, &waiter, current_thread_info());
87722+ mutex_remove_waiter(lock, &waiter, task);
87723 /* set it to 0 if there are no waiters left: */
87724 if (likely(list_empty(&lock->wait_list)))
87725 atomic_set(&lock->count, 0);
87726@@ -601,7 +601,7 @@ skip_wait:
87727 return 0;
87728
87729 err:
87730- mutex_remove_waiter(lock, &waiter, task_thread_info(task));
87731+ mutex_remove_waiter(lock, &waiter, task);
87732 spin_unlock_mutex(&lock->wait_lock, flags);
87733 debug_mutex_free_waiter(&waiter);
87734 mutex_release(&lock->dep_map, 1, ip);
87735diff --git a/kernel/locking/rtmutex-tester.c b/kernel/locking/rtmutex-tester.c
87736index 1d96dd0..994ff19 100644
87737--- a/kernel/locking/rtmutex-tester.c
87738+++ b/kernel/locking/rtmutex-tester.c
87739@@ -22,7 +22,7 @@
87740 #define MAX_RT_TEST_MUTEXES 8
87741
87742 static spinlock_t rttest_lock;
87743-static atomic_t rttest_event;
87744+static atomic_unchecked_t rttest_event;
87745
87746 struct test_thread_data {
87747 int opcode;
87748@@ -63,7 +63,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
87749
87750 case RTTEST_LOCKCONT:
87751 td->mutexes[td->opdata] = 1;
87752- td->event = atomic_add_return(1, &rttest_event);
87753+ td->event = atomic_add_return_unchecked(1, &rttest_event);
87754 return 0;
87755
87756 case RTTEST_RESET:
87757@@ -76,7 +76,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
87758 return 0;
87759
87760 case RTTEST_RESETEVENT:
87761- atomic_set(&rttest_event, 0);
87762+ atomic_set_unchecked(&rttest_event, 0);
87763 return 0;
87764
87765 default:
87766@@ -93,9 +93,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
87767 return ret;
87768
87769 td->mutexes[id] = 1;
87770- td->event = atomic_add_return(1, &rttest_event);
87771+ td->event = atomic_add_return_unchecked(1, &rttest_event);
87772 rt_mutex_lock(&mutexes[id]);
87773- td->event = atomic_add_return(1, &rttest_event);
87774+ td->event = atomic_add_return_unchecked(1, &rttest_event);
87775 td->mutexes[id] = 4;
87776 return 0;
87777
87778@@ -106,9 +106,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
87779 return ret;
87780
87781 td->mutexes[id] = 1;
87782- td->event = atomic_add_return(1, &rttest_event);
87783+ td->event = atomic_add_return_unchecked(1, &rttest_event);
87784 ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
87785- td->event = atomic_add_return(1, &rttest_event);
87786+ td->event = atomic_add_return_unchecked(1, &rttest_event);
87787 td->mutexes[id] = ret ? 0 : 4;
87788 return ret ? -EINTR : 0;
87789
87790@@ -117,9 +117,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
87791 if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
87792 return ret;
87793
87794- td->event = atomic_add_return(1, &rttest_event);
87795+ td->event = atomic_add_return_unchecked(1, &rttest_event);
87796 rt_mutex_unlock(&mutexes[id]);
87797- td->event = atomic_add_return(1, &rttest_event);
87798+ td->event = atomic_add_return_unchecked(1, &rttest_event);
87799 td->mutexes[id] = 0;
87800 return 0;
87801
87802@@ -166,7 +166,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
87803 break;
87804
87805 td->mutexes[dat] = 2;
87806- td->event = atomic_add_return(1, &rttest_event);
87807+ td->event = atomic_add_return_unchecked(1, &rttest_event);
87808 break;
87809
87810 default:
87811@@ -186,7 +186,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
87812 return;
87813
87814 td->mutexes[dat] = 3;
87815- td->event = atomic_add_return(1, &rttest_event);
87816+ td->event = atomic_add_return_unchecked(1, &rttest_event);
87817 break;
87818
87819 case RTTEST_LOCKNOWAIT:
87820@@ -198,7 +198,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
87821 return;
87822
87823 td->mutexes[dat] = 1;
87824- td->event = atomic_add_return(1, &rttest_event);
87825+ td->event = atomic_add_return_unchecked(1, &rttest_event);
87826 return;
87827
87828 default:
87829diff --git a/kernel/module.c b/kernel/module.c
87830index 6716a1f..9ddc1e1 100644
87831--- a/kernel/module.c
87832+++ b/kernel/module.c
87833@@ -61,6 +61,7 @@
87834 #include <linux/pfn.h>
87835 #include <linux/bsearch.h>
87836 #include <linux/fips.h>
87837+#include <linux/grsecurity.h>
87838 #include <uapi/linux/module.h>
87839 #include "module-internal.h"
87840
87841@@ -157,7 +158,8 @@ static BLOCKING_NOTIFIER_HEAD(module_notify_list);
87842
87843 /* Bounds of module allocation, for speeding __module_address.
87844 * Protected by module_mutex. */
87845-static unsigned long module_addr_min = -1UL, module_addr_max = 0;
87846+static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
87847+static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
87848
87849 int register_module_notifier(struct notifier_block * nb)
87850 {
87851@@ -324,7 +326,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
87852 return true;
87853
87854 list_for_each_entry_rcu(mod, &modules, list) {
87855- struct symsearch arr[] = {
87856+ struct symsearch modarr[] = {
87857 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
87858 NOT_GPL_ONLY, false },
87859 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
87860@@ -349,7 +351,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
87861 if (mod->state == MODULE_STATE_UNFORMED)
87862 continue;
87863
87864- if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
87865+ if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
87866 return true;
87867 }
87868 return false;
87869@@ -489,7 +491,7 @@ static int percpu_modalloc(struct module *mod, struct load_info *info)
87870 if (!pcpusec->sh_size)
87871 return 0;
87872
87873- if (align > PAGE_SIZE) {
87874+ if (align-1 >= PAGE_SIZE) {
87875 pr_warn("%s: per-cpu alignment %li > %li\n",
87876 mod->name, align, PAGE_SIZE);
87877 align = PAGE_SIZE;
87878@@ -1059,7 +1061,7 @@ struct module_attribute module_uevent =
87879 static ssize_t show_coresize(struct module_attribute *mattr,
87880 struct module_kobject *mk, char *buffer)
87881 {
87882- return sprintf(buffer, "%u\n", mk->mod->core_size);
87883+ return sprintf(buffer, "%u\n", mk->mod->core_size_rx + mk->mod->core_size_rw);
87884 }
87885
87886 static struct module_attribute modinfo_coresize =
87887@@ -1068,7 +1070,7 @@ static struct module_attribute modinfo_coresize =
87888 static ssize_t show_initsize(struct module_attribute *mattr,
87889 struct module_kobject *mk, char *buffer)
87890 {
87891- return sprintf(buffer, "%u\n", mk->mod->init_size);
87892+ return sprintf(buffer, "%u\n", mk->mod->init_size_rx + mk->mod->init_size_rw);
87893 }
87894
87895 static struct module_attribute modinfo_initsize =
87896@@ -1160,12 +1162,29 @@ static int check_version(Elf_Shdr *sechdrs,
87897 goto bad_version;
87898 }
87899
87900+#ifdef CONFIG_GRKERNSEC_RANDSTRUCT
87901+ /*
87902+ * avoid potentially printing jibberish on attempted load
87903+ * of a module randomized with a different seed
87904+ */
87905+ pr_warn("no symbol version for %s\n", symname);
87906+#else
87907 pr_warn("%s: no symbol version for %s\n", mod->name, symname);
87908+#endif
87909 return 0;
87910
87911 bad_version:
87912+#ifdef CONFIG_GRKERNSEC_RANDSTRUCT
87913+ /*
87914+ * avoid potentially printing jibberish on attempted load
87915+ * of a module randomized with a different seed
87916+ */
87917+ printk("attempted module disagrees about version of symbol %s\n",
87918+ symname);
87919+#else
87920 printk("%s: disagrees about version of symbol %s\n",
87921 mod->name, symname);
87922+#endif
87923 return 0;
87924 }
87925
87926@@ -1281,7 +1300,7 @@ resolve_symbol_wait(struct module *mod,
87927 */
87928 #ifdef CONFIG_SYSFS
87929
87930-#ifdef CONFIG_KALLSYMS
87931+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
87932 static inline bool sect_empty(const Elf_Shdr *sect)
87933 {
87934 return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0;
87935@@ -1421,7 +1440,7 @@ static void add_notes_attrs(struct module *mod, const struct load_info *info)
87936 {
87937 unsigned int notes, loaded, i;
87938 struct module_notes_attrs *notes_attrs;
87939- struct bin_attribute *nattr;
87940+ bin_attribute_no_const *nattr;
87941
87942 /* failed to create section attributes, so can't create notes */
87943 if (!mod->sect_attrs)
87944@@ -1533,7 +1552,7 @@ static void del_usage_links(struct module *mod)
87945 static int module_add_modinfo_attrs(struct module *mod)
87946 {
87947 struct module_attribute *attr;
87948- struct module_attribute *temp_attr;
87949+ module_attribute_no_const *temp_attr;
87950 int error = 0;
87951 int i;
87952
87953@@ -1754,21 +1773,21 @@ static void set_section_ro_nx(void *base,
87954
87955 static void unset_module_core_ro_nx(struct module *mod)
87956 {
87957- set_page_attributes(mod->module_core + mod->core_text_size,
87958- mod->module_core + mod->core_size,
87959+ set_page_attributes(mod->module_core_rw,
87960+ mod->module_core_rw + mod->core_size_rw,
87961 set_memory_x);
87962- set_page_attributes(mod->module_core,
87963- mod->module_core + mod->core_ro_size,
87964+ set_page_attributes(mod->module_core_rx,
87965+ mod->module_core_rx + mod->core_size_rx,
87966 set_memory_rw);
87967 }
87968
87969 static void unset_module_init_ro_nx(struct module *mod)
87970 {
87971- set_page_attributes(mod->module_init + mod->init_text_size,
87972- mod->module_init + mod->init_size,
87973+ set_page_attributes(mod->module_init_rw,
87974+ mod->module_init_rw + mod->init_size_rw,
87975 set_memory_x);
87976- set_page_attributes(mod->module_init,
87977- mod->module_init + mod->init_ro_size,
87978+ set_page_attributes(mod->module_init_rx,
87979+ mod->module_init_rx + mod->init_size_rx,
87980 set_memory_rw);
87981 }
87982
87983@@ -1781,14 +1800,14 @@ void set_all_modules_text_rw(void)
87984 list_for_each_entry_rcu(mod, &modules, list) {
87985 if (mod->state == MODULE_STATE_UNFORMED)
87986 continue;
87987- if ((mod->module_core) && (mod->core_text_size)) {
87988- set_page_attributes(mod->module_core,
87989- mod->module_core + mod->core_text_size,
87990+ if ((mod->module_core_rx) && (mod->core_size_rx)) {
87991+ set_page_attributes(mod->module_core_rx,
87992+ mod->module_core_rx + mod->core_size_rx,
87993 set_memory_rw);
87994 }
87995- if ((mod->module_init) && (mod->init_text_size)) {
87996- set_page_attributes(mod->module_init,
87997- mod->module_init + mod->init_text_size,
87998+ if ((mod->module_init_rx) && (mod->init_size_rx)) {
87999+ set_page_attributes(mod->module_init_rx,
88000+ mod->module_init_rx + mod->init_size_rx,
88001 set_memory_rw);
88002 }
88003 }
88004@@ -1804,14 +1823,14 @@ void set_all_modules_text_ro(void)
88005 list_for_each_entry_rcu(mod, &modules, list) {
88006 if (mod->state == MODULE_STATE_UNFORMED)
88007 continue;
88008- if ((mod->module_core) && (mod->core_text_size)) {
88009- set_page_attributes(mod->module_core,
88010- mod->module_core + mod->core_text_size,
88011+ if ((mod->module_core_rx) && (mod->core_size_rx)) {
88012+ set_page_attributes(mod->module_core_rx,
88013+ mod->module_core_rx + mod->core_size_rx,
88014 set_memory_ro);
88015 }
88016- if ((mod->module_init) && (mod->init_text_size)) {
88017- set_page_attributes(mod->module_init,
88018- mod->module_init + mod->init_text_size,
88019+ if ((mod->module_init_rx) && (mod->init_size_rx)) {
88020+ set_page_attributes(mod->module_init_rx,
88021+ mod->module_init_rx + mod->init_size_rx,
88022 set_memory_ro);
88023 }
88024 }
88025@@ -1862,16 +1881,19 @@ static void free_module(struct module *mod)
88026
88027 /* This may be NULL, but that's OK */
88028 unset_module_init_ro_nx(mod);
88029- module_free(mod, mod->module_init);
88030+ module_free(mod, mod->module_init_rw);
88031+ module_free_exec(mod, mod->module_init_rx);
88032 kfree(mod->args);
88033 percpu_modfree(mod);
88034
88035 /* Free lock-classes: */
88036- lockdep_free_key_range(mod->module_core, mod->core_size);
88037+ lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
88038+ lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
88039
88040 /* Finally, free the core (containing the module structure) */
88041 unset_module_core_ro_nx(mod);
88042- module_free(mod, mod->module_core);
88043+ module_free_exec(mod, mod->module_core_rx);
88044+ module_free(mod, mod->module_core_rw);
88045
88046 #ifdef CONFIG_MPU
88047 update_protections(current->mm);
88048@@ -1940,9 +1962,31 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
88049 int ret = 0;
88050 const struct kernel_symbol *ksym;
88051
88052+#ifdef CONFIG_GRKERNSEC_MODHARDEN
88053+ int is_fs_load = 0;
88054+ int register_filesystem_found = 0;
88055+ char *p;
88056+
88057+ p = strstr(mod->args, "grsec_modharden_fs");
88058+ if (p) {
88059+ char *endptr = p + sizeof("grsec_modharden_fs") - 1;
88060+ /* copy \0 as well */
88061+ memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1);
88062+ is_fs_load = 1;
88063+ }
88064+#endif
88065+
88066 for (i = 1; i < symsec->sh_size / sizeof(Elf_Sym); i++) {
88067 const char *name = info->strtab + sym[i].st_name;
88068
88069+#ifdef CONFIG_GRKERNSEC_MODHARDEN
88070+ /* it's a real shame this will never get ripped and copied
88071+ upstream! ;(
88072+ */
88073+ if (is_fs_load && !strcmp(name, "register_filesystem"))
88074+ register_filesystem_found = 1;
88075+#endif
88076+
88077 switch (sym[i].st_shndx) {
88078 case SHN_COMMON:
88079 /* We compiled with -fno-common. These are not
88080@@ -1963,7 +2007,9 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
88081 ksym = resolve_symbol_wait(mod, info, name);
88082 /* Ok if resolved. */
88083 if (ksym && !IS_ERR(ksym)) {
88084+ pax_open_kernel();
88085 sym[i].st_value = ksym->value;
88086+ pax_close_kernel();
88087 break;
88088 }
88089
88090@@ -1982,11 +2028,20 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
88091 secbase = (unsigned long)mod_percpu(mod);
88092 else
88093 secbase = info->sechdrs[sym[i].st_shndx].sh_addr;
88094+ pax_open_kernel();
88095 sym[i].st_value += secbase;
88096+ pax_close_kernel();
88097 break;
88098 }
88099 }
88100
88101+#ifdef CONFIG_GRKERNSEC_MODHARDEN
88102+ if (is_fs_load && !register_filesystem_found) {
88103+ printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name);
88104+ ret = -EPERM;
88105+ }
88106+#endif
88107+
88108 return ret;
88109 }
88110
88111@@ -2070,22 +2125,12 @@ static void layout_sections(struct module *mod, struct load_info *info)
88112 || s->sh_entsize != ~0UL
88113 || strstarts(sname, ".init"))
88114 continue;
88115- s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
88116+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
88117+ s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
88118+ else
88119+ s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
88120 pr_debug("\t%s\n", sname);
88121 }
88122- switch (m) {
88123- case 0: /* executable */
88124- mod->core_size = debug_align(mod->core_size);
88125- mod->core_text_size = mod->core_size;
88126- break;
88127- case 1: /* RO: text and ro-data */
88128- mod->core_size = debug_align(mod->core_size);
88129- mod->core_ro_size = mod->core_size;
88130- break;
88131- case 3: /* whole core */
88132- mod->core_size = debug_align(mod->core_size);
88133- break;
88134- }
88135 }
88136
88137 pr_debug("Init section allocation order:\n");
88138@@ -2099,23 +2144,13 @@ static void layout_sections(struct module *mod, struct load_info *info)
88139 || s->sh_entsize != ~0UL
88140 || !strstarts(sname, ".init"))
88141 continue;
88142- s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
88143- | INIT_OFFSET_MASK);
88144+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
88145+ s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
88146+ else
88147+ s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
88148+ s->sh_entsize |= INIT_OFFSET_MASK;
88149 pr_debug("\t%s\n", sname);
88150 }
88151- switch (m) {
88152- case 0: /* executable */
88153- mod->init_size = debug_align(mod->init_size);
88154- mod->init_text_size = mod->init_size;
88155- break;
88156- case 1: /* RO: text and ro-data */
88157- mod->init_size = debug_align(mod->init_size);
88158- mod->init_ro_size = mod->init_size;
88159- break;
88160- case 3: /* whole init */
88161- mod->init_size = debug_align(mod->init_size);
88162- break;
88163- }
88164 }
88165 }
88166
88167@@ -2288,7 +2323,7 @@ static void layout_symtab(struct module *mod, struct load_info *info)
88168
88169 /* Put symbol section at end of init part of module. */
88170 symsect->sh_flags |= SHF_ALLOC;
88171- symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
88172+ symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
88173 info->index.sym) | INIT_OFFSET_MASK;
88174 pr_debug("\t%s\n", info->secstrings + symsect->sh_name);
88175
88176@@ -2305,13 +2340,13 @@ static void layout_symtab(struct module *mod, struct load_info *info)
88177 }
88178
88179 /* Append room for core symbols at end of core part. */
88180- info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
88181- info->stroffs = mod->core_size = info->symoffs + ndst * sizeof(Elf_Sym);
88182- mod->core_size += strtab_size;
88183+ info->symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
88184+ info->stroffs = mod->core_size_rx = info->symoffs + ndst * sizeof(Elf_Sym);
88185+ mod->core_size_rx += strtab_size;
88186
88187 /* Put string table section at end of init part of module. */
88188 strsect->sh_flags |= SHF_ALLOC;
88189- strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
88190+ strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
88191 info->index.str) | INIT_OFFSET_MASK;
88192 pr_debug("\t%s\n", info->secstrings + strsect->sh_name);
88193 }
88194@@ -2329,12 +2364,14 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
88195 /* Make sure we get permanent strtab: don't use info->strtab. */
88196 mod->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
88197
88198+ pax_open_kernel();
88199+
88200 /* Set types up while we still have access to sections. */
88201 for (i = 0; i < mod->num_symtab; i++)
88202 mod->symtab[i].st_info = elf_type(&mod->symtab[i], info);
88203
88204- mod->core_symtab = dst = mod->module_core + info->symoffs;
88205- mod->core_strtab = s = mod->module_core + info->stroffs;
88206+ mod->core_symtab = dst = mod->module_core_rx + info->symoffs;
88207+ mod->core_strtab = s = mod->module_core_rx + info->stroffs;
88208 src = mod->symtab;
88209 for (ndst = i = 0; i < mod->num_symtab; i++) {
88210 if (i == 0 ||
88211@@ -2346,6 +2383,8 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
88212 }
88213 }
88214 mod->core_num_syms = ndst;
88215+
88216+ pax_close_kernel();
88217 }
88218 #else
88219 static inline void layout_symtab(struct module *mod, struct load_info *info)
88220@@ -2379,17 +2418,33 @@ void * __weak module_alloc(unsigned long size)
88221 return vmalloc_exec(size);
88222 }
88223
88224-static void *module_alloc_update_bounds(unsigned long size)
88225+static void *module_alloc_update_bounds_rw(unsigned long size)
88226 {
88227 void *ret = module_alloc(size);
88228
88229 if (ret) {
88230 mutex_lock(&module_mutex);
88231 /* Update module bounds. */
88232- if ((unsigned long)ret < module_addr_min)
88233- module_addr_min = (unsigned long)ret;
88234- if ((unsigned long)ret + size > module_addr_max)
88235- module_addr_max = (unsigned long)ret + size;
88236+ if ((unsigned long)ret < module_addr_min_rw)
88237+ module_addr_min_rw = (unsigned long)ret;
88238+ if ((unsigned long)ret + size > module_addr_max_rw)
88239+ module_addr_max_rw = (unsigned long)ret + size;
88240+ mutex_unlock(&module_mutex);
88241+ }
88242+ return ret;
88243+}
88244+
88245+static void *module_alloc_update_bounds_rx(unsigned long size)
88246+{
88247+ void *ret = module_alloc_exec(size);
88248+
88249+ if (ret) {
88250+ mutex_lock(&module_mutex);
88251+ /* Update module bounds. */
88252+ if ((unsigned long)ret < module_addr_min_rx)
88253+ module_addr_min_rx = (unsigned long)ret;
88254+ if ((unsigned long)ret + size > module_addr_max_rx)
88255+ module_addr_max_rx = (unsigned long)ret + size;
88256 mutex_unlock(&module_mutex);
88257 }
88258 return ret;
88259@@ -2646,7 +2701,15 @@ static struct module *setup_load_info(struct load_info *info, int flags)
88260 mod = (void *)info->sechdrs[info->index.mod].sh_addr;
88261
88262 if (info->index.sym == 0) {
88263+#ifdef CONFIG_GRKERNSEC_RANDSTRUCT
88264+ /*
88265+ * avoid potentially printing jibberish on attempted load
88266+ * of a module randomized with a different seed
88267+ */
88268+ pr_warn("module has no symbols (stripped?)\n");
88269+#else
88270 pr_warn("%s: module has no symbols (stripped?)\n", mod->name);
88271+#endif
88272 return ERR_PTR(-ENOEXEC);
88273 }
88274
88275@@ -2662,8 +2725,14 @@ static struct module *setup_load_info(struct load_info *info, int flags)
88276 static int check_modinfo(struct module *mod, struct load_info *info, int flags)
88277 {
88278 const char *modmagic = get_modinfo(info, "vermagic");
88279+ const char *license = get_modinfo(info, "license");
88280 int err;
88281
88282+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
88283+ if (!license || !license_is_gpl_compatible(license))
88284+ return -ENOEXEC;
88285+#endif
88286+
88287 if (flags & MODULE_INIT_IGNORE_VERMAGIC)
88288 modmagic = NULL;
88289
88290@@ -2688,7 +2757,7 @@ static int check_modinfo(struct module *mod, struct load_info *info, int flags)
88291 }
88292
88293 /* Set up license info based on the info section */
88294- set_license(mod, get_modinfo(info, "license"));
88295+ set_license(mod, license);
88296
88297 return 0;
88298 }
88299@@ -2782,7 +2851,7 @@ static int move_module(struct module *mod, struct load_info *info)
88300 void *ptr;
88301
88302 /* Do the allocs. */
88303- ptr = module_alloc_update_bounds(mod->core_size);
88304+ ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
88305 /*
88306 * The pointer to this block is stored in the module structure
88307 * which is inside the block. Just mark it as not being a
88308@@ -2792,11 +2861,11 @@ static int move_module(struct module *mod, struct load_info *info)
88309 if (!ptr)
88310 return -ENOMEM;
88311
88312- memset(ptr, 0, mod->core_size);
88313- mod->module_core = ptr;
88314+ memset(ptr, 0, mod->core_size_rw);
88315+ mod->module_core_rw = ptr;
88316
88317- if (mod->init_size) {
88318- ptr = module_alloc_update_bounds(mod->init_size);
88319+ if (mod->init_size_rw) {
88320+ ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
88321 /*
88322 * The pointer to this block is stored in the module structure
88323 * which is inside the block. This block doesn't need to be
88324@@ -2805,13 +2874,45 @@ static int move_module(struct module *mod, struct load_info *info)
88325 */
88326 kmemleak_ignore(ptr);
88327 if (!ptr) {
88328- module_free(mod, mod->module_core);
88329+ module_free(mod, mod->module_core_rw);
88330 return -ENOMEM;
88331 }
88332- memset(ptr, 0, mod->init_size);
88333- mod->module_init = ptr;
88334+ memset(ptr, 0, mod->init_size_rw);
88335+ mod->module_init_rw = ptr;
88336 } else
88337- mod->module_init = NULL;
88338+ mod->module_init_rw = NULL;
88339+
88340+ ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
88341+ kmemleak_not_leak(ptr);
88342+ if (!ptr) {
88343+ if (mod->module_init_rw)
88344+ module_free(mod, mod->module_init_rw);
88345+ module_free(mod, mod->module_core_rw);
88346+ return -ENOMEM;
88347+ }
88348+
88349+ pax_open_kernel();
88350+ memset(ptr, 0, mod->core_size_rx);
88351+ pax_close_kernel();
88352+ mod->module_core_rx = ptr;
88353+
88354+ if (mod->init_size_rx) {
88355+ ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
88356+ kmemleak_ignore(ptr);
88357+ if (!ptr && mod->init_size_rx) {
88358+ module_free_exec(mod, mod->module_core_rx);
88359+ if (mod->module_init_rw)
88360+ module_free(mod, mod->module_init_rw);
88361+ module_free(mod, mod->module_core_rw);
88362+ return -ENOMEM;
88363+ }
88364+
88365+ pax_open_kernel();
88366+ memset(ptr, 0, mod->init_size_rx);
88367+ pax_close_kernel();
88368+ mod->module_init_rx = ptr;
88369+ } else
88370+ mod->module_init_rx = NULL;
88371
88372 /* Transfer each section which specifies SHF_ALLOC */
88373 pr_debug("final section addresses:\n");
88374@@ -2822,16 +2923,45 @@ static int move_module(struct module *mod, struct load_info *info)
88375 if (!(shdr->sh_flags & SHF_ALLOC))
88376 continue;
88377
88378- if (shdr->sh_entsize & INIT_OFFSET_MASK)
88379- dest = mod->module_init
88380- + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
88381- else
88382- dest = mod->module_core + shdr->sh_entsize;
88383+ if (shdr->sh_entsize & INIT_OFFSET_MASK) {
88384+ if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
88385+ dest = mod->module_init_rw
88386+ + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
88387+ else
88388+ dest = mod->module_init_rx
88389+ + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
88390+ } else {
88391+ if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
88392+ dest = mod->module_core_rw + shdr->sh_entsize;
88393+ else
88394+ dest = mod->module_core_rx + shdr->sh_entsize;
88395+ }
88396+
88397+ if (shdr->sh_type != SHT_NOBITS) {
88398+
88399+#ifdef CONFIG_PAX_KERNEXEC
88400+#ifdef CONFIG_X86_64
88401+ if ((shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_EXECINSTR))
88402+ set_memory_x((unsigned long)dest, (shdr->sh_size + PAGE_SIZE) >> PAGE_SHIFT);
88403+#endif
88404+ if (!(shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_ALLOC)) {
88405+ pax_open_kernel();
88406+ memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
88407+ pax_close_kernel();
88408+ } else
88409+#endif
88410
88411- if (shdr->sh_type != SHT_NOBITS)
88412 memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
88413+ }
88414 /* Update sh_addr to point to copy in image. */
88415- shdr->sh_addr = (unsigned long)dest;
88416+
88417+#ifdef CONFIG_PAX_KERNEXEC
88418+ if (shdr->sh_flags & SHF_EXECINSTR)
88419+ shdr->sh_addr = ktva_ktla((unsigned long)dest);
88420+ else
88421+#endif
88422+
88423+ shdr->sh_addr = (unsigned long)dest;
88424 pr_debug("\t0x%lx %s\n",
88425 (long)shdr->sh_addr, info->secstrings + shdr->sh_name);
88426 }
88427@@ -2888,12 +3018,12 @@ static void flush_module_icache(const struct module *mod)
88428 * Do it before processing of module parameters, so the module
88429 * can provide parameter accessor functions of its own.
88430 */
88431- if (mod->module_init)
88432- flush_icache_range((unsigned long)mod->module_init,
88433- (unsigned long)mod->module_init
88434- + mod->init_size);
88435- flush_icache_range((unsigned long)mod->module_core,
88436- (unsigned long)mod->module_core + mod->core_size);
88437+ if (mod->module_init_rx)
88438+ flush_icache_range((unsigned long)mod->module_init_rx,
88439+ (unsigned long)mod->module_init_rx
88440+ + mod->init_size_rx);
88441+ flush_icache_range((unsigned long)mod->module_core_rx,
88442+ (unsigned long)mod->module_core_rx + mod->core_size_rx);
88443
88444 set_fs(old_fs);
88445 }
88446@@ -2950,8 +3080,10 @@ static struct module *layout_and_allocate(struct load_info *info, int flags)
88447 static void module_deallocate(struct module *mod, struct load_info *info)
88448 {
88449 percpu_modfree(mod);
88450- module_free(mod, mod->module_init);
88451- module_free(mod, mod->module_core);
88452+ module_free_exec(mod, mod->module_init_rx);
88453+ module_free_exec(mod, mod->module_core_rx);
88454+ module_free(mod, mod->module_init_rw);
88455+ module_free(mod, mod->module_core_rw);
88456 }
88457
88458 int __weak module_finalize(const Elf_Ehdr *hdr,
88459@@ -2964,7 +3096,9 @@ int __weak module_finalize(const Elf_Ehdr *hdr,
88460 static int post_relocation(struct module *mod, const struct load_info *info)
88461 {
88462 /* Sort exception table now relocations are done. */
88463+ pax_open_kernel();
88464 sort_extable(mod->extable, mod->extable + mod->num_exentries);
88465+ pax_close_kernel();
88466
88467 /* Copy relocated percpu area over. */
88468 percpu_modcopy(mod, (void *)info->sechdrs[info->index.pcpu].sh_addr,
88469@@ -3018,16 +3152,16 @@ static int do_init_module(struct module *mod)
88470 MODULE_STATE_COMING, mod);
88471
88472 /* Set RO and NX regions for core */
88473- set_section_ro_nx(mod->module_core,
88474- mod->core_text_size,
88475- mod->core_ro_size,
88476- mod->core_size);
88477+ set_section_ro_nx(mod->module_core_rx,
88478+ mod->core_size_rx,
88479+ mod->core_size_rx,
88480+ mod->core_size_rx);
88481
88482 /* Set RO and NX regions for init */
88483- set_section_ro_nx(mod->module_init,
88484- mod->init_text_size,
88485- mod->init_ro_size,
88486- mod->init_size);
88487+ set_section_ro_nx(mod->module_init_rx,
88488+ mod->init_size_rx,
88489+ mod->init_size_rx,
88490+ mod->init_size_rx);
88491
88492 do_mod_ctors(mod);
88493 /* Start the module */
88494@@ -3088,11 +3222,12 @@ static int do_init_module(struct module *mod)
88495 mod->strtab = mod->core_strtab;
88496 #endif
88497 unset_module_init_ro_nx(mod);
88498- module_free(mod, mod->module_init);
88499- mod->module_init = NULL;
88500- mod->init_size = 0;
88501- mod->init_ro_size = 0;
88502- mod->init_text_size = 0;
88503+ module_free(mod, mod->module_init_rw);
88504+ module_free_exec(mod, mod->module_init_rx);
88505+ mod->module_init_rw = NULL;
88506+ mod->module_init_rx = NULL;
88507+ mod->init_size_rw = 0;
88508+ mod->init_size_rx = 0;
88509 mutex_unlock(&module_mutex);
88510 wake_up_all(&module_wq);
88511
88512@@ -3235,9 +3370,38 @@ static int load_module(struct load_info *info, const char __user *uargs,
88513 if (err)
88514 goto free_unload;
88515
88516+ /* Now copy in args */
88517+ mod->args = strndup_user(uargs, ~0UL >> 1);
88518+ if (IS_ERR(mod->args)) {
88519+ err = PTR_ERR(mod->args);
88520+ goto free_unload;
88521+ }
88522+
88523 /* Set up MODINFO_ATTR fields */
88524 setup_modinfo(mod, info);
88525
88526+#ifdef CONFIG_GRKERNSEC_MODHARDEN
88527+ {
88528+ char *p, *p2;
88529+
88530+ if (strstr(mod->args, "grsec_modharden_netdev")) {
88531+ printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name);
88532+ err = -EPERM;
88533+ goto free_modinfo;
88534+ } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) {
88535+ p += sizeof("grsec_modharden_normal") - 1;
88536+ p2 = strstr(p, "_");
88537+ if (p2) {
88538+ *p2 = '\0';
88539+ printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p);
88540+ *p2 = '_';
88541+ }
88542+ err = -EPERM;
88543+ goto free_modinfo;
88544+ }
88545+ }
88546+#endif
88547+
88548 /* Fix up syms, so that st_value is a pointer to location. */
88549 err = simplify_symbols(mod, info);
88550 if (err < 0)
88551@@ -3253,13 +3417,6 @@ static int load_module(struct load_info *info, const char __user *uargs,
88552
88553 flush_module_icache(mod);
88554
88555- /* Now copy in args */
88556- mod->args = strndup_user(uargs, ~0UL >> 1);
88557- if (IS_ERR(mod->args)) {
88558- err = PTR_ERR(mod->args);
88559- goto free_arch_cleanup;
88560- }
88561-
88562 dynamic_debug_setup(info->debug, info->num_debug);
88563
88564 /* Ftrace init must be called in the MODULE_STATE_UNFORMED state */
88565@@ -3297,11 +3454,10 @@ static int load_module(struct load_info *info, const char __user *uargs,
88566 ddebug_cleanup:
88567 dynamic_debug_remove(info->debug);
88568 synchronize_sched();
88569- kfree(mod->args);
88570- free_arch_cleanup:
88571 module_arch_cleanup(mod);
88572 free_modinfo:
88573 free_modinfo(mod);
88574+ kfree(mod->args);
88575 free_unload:
88576 module_unload_free(mod);
88577 unlink_mod:
88578@@ -3384,10 +3540,16 @@ static const char *get_ksymbol(struct module *mod,
88579 unsigned long nextval;
88580
88581 /* At worse, next value is at end of module */
88582- if (within_module_init(addr, mod))
88583- nextval = (unsigned long)mod->module_init+mod->init_text_size;
88584+ if (within_module_init_rx(addr, mod))
88585+ nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
88586+ else if (within_module_init_rw(addr, mod))
88587+ nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
88588+ else if (within_module_core_rx(addr, mod))
88589+ nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
88590+ else if (within_module_core_rw(addr, mod))
88591+ nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
88592 else
88593- nextval = (unsigned long)mod->module_core+mod->core_text_size;
88594+ return NULL;
88595
88596 /* Scan for closest preceding symbol, and next symbol. (ELF
88597 starts real symbols at 1). */
88598@@ -3638,7 +3800,7 @@ static int m_show(struct seq_file *m, void *p)
88599 return 0;
88600
88601 seq_printf(m, "%s %u",
88602- mod->name, mod->init_size + mod->core_size);
88603+ mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
88604 print_unload_info(m, mod);
88605
88606 /* Informative for users. */
88607@@ -3647,7 +3809,7 @@ static int m_show(struct seq_file *m, void *p)
88608 mod->state == MODULE_STATE_COMING ? "Loading":
88609 "Live");
88610 /* Used by oprofile and other similar tools. */
88611- seq_printf(m, " 0x%pK", mod->module_core);
88612+ seq_printf(m, " 0x%pK 0x%pK", mod->module_core_rx, mod->module_core_rw);
88613
88614 /* Taints info */
88615 if (mod->taints)
88616@@ -3683,7 +3845,17 @@ static const struct file_operations proc_modules_operations = {
88617
88618 static int __init proc_modules_init(void)
88619 {
88620+#ifndef CONFIG_GRKERNSEC_HIDESYM
88621+#ifdef CONFIG_GRKERNSEC_PROC_USER
88622+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
88623+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
88624+ proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
88625+#else
88626 proc_create("modules", 0, NULL, &proc_modules_operations);
88627+#endif
88628+#else
88629+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
88630+#endif
88631 return 0;
88632 }
88633 module_init(proc_modules_init);
88634@@ -3744,14 +3916,14 @@ struct module *__module_address(unsigned long addr)
88635 {
88636 struct module *mod;
88637
88638- if (addr < module_addr_min || addr > module_addr_max)
88639+ if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
88640+ (addr < module_addr_min_rw || addr > module_addr_max_rw))
88641 return NULL;
88642
88643 list_for_each_entry_rcu(mod, &modules, list) {
88644 if (mod->state == MODULE_STATE_UNFORMED)
88645 continue;
88646- if (within_module_core(addr, mod)
88647- || within_module_init(addr, mod))
88648+ if (within_module_init(addr, mod) || within_module_core(addr, mod))
88649 return mod;
88650 }
88651 return NULL;
88652@@ -3786,11 +3958,20 @@ bool is_module_text_address(unsigned long addr)
88653 */
88654 struct module *__module_text_address(unsigned long addr)
88655 {
88656- struct module *mod = __module_address(addr);
88657+ struct module *mod;
88658+
88659+#ifdef CONFIG_X86_32
88660+ addr = ktla_ktva(addr);
88661+#endif
88662+
88663+ if (addr < module_addr_min_rx || addr > module_addr_max_rx)
88664+ return NULL;
88665+
88666+ mod = __module_address(addr);
88667+
88668 if (mod) {
88669 /* Make sure it's within the text section. */
88670- if (!within(addr, mod->module_init, mod->init_text_size)
88671- && !within(addr, mod->module_core, mod->core_text_size))
88672+ if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
88673 mod = NULL;
88674 }
88675 return mod;
88676diff --git a/kernel/notifier.c b/kernel/notifier.c
88677index 2d5cc4c..d9ea600 100644
88678--- a/kernel/notifier.c
88679+++ b/kernel/notifier.c
88680@@ -5,6 +5,7 @@
88681 #include <linux/rcupdate.h>
88682 #include <linux/vmalloc.h>
88683 #include <linux/reboot.h>
88684+#include <linux/mm.h>
88685
88686 /*
88687 * Notifier list for kernel code which wants to be called
88688@@ -24,10 +25,12 @@ static int notifier_chain_register(struct notifier_block **nl,
88689 while ((*nl) != NULL) {
88690 if (n->priority > (*nl)->priority)
88691 break;
88692- nl = &((*nl)->next);
88693+ nl = (struct notifier_block **)&((*nl)->next);
88694 }
88695- n->next = *nl;
88696+ pax_open_kernel();
88697+ *(const void **)&n->next = *nl;
88698 rcu_assign_pointer(*nl, n);
88699+ pax_close_kernel();
88700 return 0;
88701 }
88702
88703@@ -39,10 +42,12 @@ static int notifier_chain_cond_register(struct notifier_block **nl,
88704 return 0;
88705 if (n->priority > (*nl)->priority)
88706 break;
88707- nl = &((*nl)->next);
88708+ nl = (struct notifier_block **)&((*nl)->next);
88709 }
88710- n->next = *nl;
88711+ pax_open_kernel();
88712+ *(const void **)&n->next = *nl;
88713 rcu_assign_pointer(*nl, n);
88714+ pax_close_kernel();
88715 return 0;
88716 }
88717
88718@@ -51,10 +56,12 @@ static int notifier_chain_unregister(struct notifier_block **nl,
88719 {
88720 while ((*nl) != NULL) {
88721 if ((*nl) == n) {
88722+ pax_open_kernel();
88723 rcu_assign_pointer(*nl, n->next);
88724+ pax_close_kernel();
88725 return 0;
88726 }
88727- nl = &((*nl)->next);
88728+ nl = (struct notifier_block **)&((*nl)->next);
88729 }
88730 return -ENOENT;
88731 }
88732diff --git a/kernel/padata.c b/kernel/padata.c
88733index 161402f..598814c 100644
88734--- a/kernel/padata.c
88735+++ b/kernel/padata.c
88736@@ -54,7 +54,7 @@ static int padata_cpu_hash(struct parallel_data *pd)
88737 * seq_nr mod. number of cpus in use.
88738 */
88739
88740- seq_nr = atomic_inc_return(&pd->seq_nr);
88741+ seq_nr = atomic_inc_return_unchecked(&pd->seq_nr);
88742 cpu_index = seq_nr % cpumask_weight(pd->cpumask.pcpu);
88743
88744 return padata_index_to_cpu(pd, cpu_index);
88745@@ -428,7 +428,7 @@ static struct parallel_data *padata_alloc_pd(struct padata_instance *pinst,
88746 padata_init_pqueues(pd);
88747 padata_init_squeues(pd);
88748 setup_timer(&pd->timer, padata_reorder_timer, (unsigned long)pd);
88749- atomic_set(&pd->seq_nr, -1);
88750+ atomic_set_unchecked(&pd->seq_nr, -1);
88751 atomic_set(&pd->reorder_objects, 0);
88752 atomic_set(&pd->refcnt, 0);
88753 pd->pinst = pinst;
88754diff --git a/kernel/panic.c b/kernel/panic.c
88755index 6d63003..486a109 100644
88756--- a/kernel/panic.c
88757+++ b/kernel/panic.c
88758@@ -52,7 +52,7 @@ EXPORT_SYMBOL(panic_blink);
88759 /*
88760 * Stop ourself in panic -- architecture code may override this
88761 */
88762-void __weak panic_smp_self_stop(void)
88763+void __weak __noreturn panic_smp_self_stop(void)
88764 {
88765 while (1)
88766 cpu_relax();
88767@@ -407,7 +407,7 @@ static void warn_slowpath_common(const char *file, int line, void *caller,
88768 disable_trace_on_warning();
88769
88770 pr_warn("------------[ cut here ]------------\n");
88771- pr_warn("WARNING: CPU: %d PID: %d at %s:%d %pS()\n",
88772+ pr_warn("WARNING: CPU: %d PID: %d at %s:%d %pA()\n",
88773 raw_smp_processor_id(), current->pid, file, line, caller);
88774
88775 if (args)
88776@@ -461,7 +461,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
88777 */
88778 void __stack_chk_fail(void)
88779 {
88780- panic("stack-protector: Kernel stack is corrupted in: %p\n",
88781+ dump_stack();
88782+ panic("stack-protector: Kernel stack is corrupted in: %pA\n",
88783 __builtin_return_address(0));
88784 }
88785 EXPORT_SYMBOL(__stack_chk_fail);
88786diff --git a/kernel/pid.c b/kernel/pid.c
88787index 9b9a266..c20ef80 100644
88788--- a/kernel/pid.c
88789+++ b/kernel/pid.c
88790@@ -33,6 +33,7 @@
88791 #include <linux/rculist.h>
88792 #include <linux/bootmem.h>
88793 #include <linux/hash.h>
88794+#include <linux/security.h>
88795 #include <linux/pid_namespace.h>
88796 #include <linux/init_task.h>
88797 #include <linux/syscalls.h>
88798@@ -47,7 +48,7 @@ struct pid init_struct_pid = INIT_STRUCT_PID;
88799
88800 int pid_max = PID_MAX_DEFAULT;
88801
88802-#define RESERVED_PIDS 300
88803+#define RESERVED_PIDS 500
88804
88805 int pid_max_min = RESERVED_PIDS + 1;
88806 int pid_max_max = PID_MAX_LIMIT;
88807@@ -445,10 +446,18 @@ EXPORT_SYMBOL(pid_task);
88808 */
88809 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
88810 {
88811+ struct task_struct *task;
88812+
88813 rcu_lockdep_assert(rcu_read_lock_held(),
88814 "find_task_by_pid_ns() needs rcu_read_lock()"
88815 " protection");
88816- return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
88817+
88818+ task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
88819+
88820+ if (gr_pid_is_chrooted(task))
88821+ return NULL;
88822+
88823+ return task;
88824 }
88825
88826 struct task_struct *find_task_by_vpid(pid_t vnr)
88827@@ -456,6 +465,14 @@ struct task_struct *find_task_by_vpid(pid_t vnr)
88828 return find_task_by_pid_ns(vnr, task_active_pid_ns(current));
88829 }
88830
88831+struct task_struct *find_task_by_vpid_unrestricted(pid_t vnr)
88832+{
88833+ rcu_lockdep_assert(rcu_read_lock_held(),
88834+ "find_task_by_pid_ns() needs rcu_read_lock()"
88835+ " protection");
88836+ return pid_task(find_pid_ns(vnr, task_active_pid_ns(current)), PIDTYPE_PID);
88837+}
88838+
88839 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
88840 {
88841 struct pid *pid;
88842diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c
88843index db95d8e..a0ca23f 100644
88844--- a/kernel/pid_namespace.c
88845+++ b/kernel/pid_namespace.c
88846@@ -253,7 +253,7 @@ static int pid_ns_ctl_handler(struct ctl_table *table, int write,
88847 void __user *buffer, size_t *lenp, loff_t *ppos)
88848 {
88849 struct pid_namespace *pid_ns = task_active_pid_ns(current);
88850- struct ctl_table tmp = *table;
88851+ ctl_table_no_const tmp = *table;
88852
88853 if (write && !ns_capable(pid_ns->user_ns, CAP_SYS_ADMIN))
88854 return -EPERM;
88855diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
88856index 3b89464..5e38379 100644
88857--- a/kernel/posix-cpu-timers.c
88858+++ b/kernel/posix-cpu-timers.c
88859@@ -1464,14 +1464,14 @@ struct k_clock clock_posix_cpu = {
88860
88861 static __init int init_posix_cpu_timers(void)
88862 {
88863- struct k_clock process = {
88864+ static struct k_clock process = {
88865 .clock_getres = process_cpu_clock_getres,
88866 .clock_get = process_cpu_clock_get,
88867 .timer_create = process_cpu_timer_create,
88868 .nsleep = process_cpu_nsleep,
88869 .nsleep_restart = process_cpu_nsleep_restart,
88870 };
88871- struct k_clock thread = {
88872+ static struct k_clock thread = {
88873 .clock_getres = thread_cpu_clock_getres,
88874 .clock_get = thread_cpu_clock_get,
88875 .timer_create = thread_cpu_timer_create,
88876diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
88877index 424c2d4..679242f 100644
88878--- a/kernel/posix-timers.c
88879+++ b/kernel/posix-timers.c
88880@@ -43,6 +43,7 @@
88881 #include <linux/hash.h>
88882 #include <linux/posix-clock.h>
88883 #include <linux/posix-timers.h>
88884+#include <linux/grsecurity.h>
88885 #include <linux/syscalls.h>
88886 #include <linux/wait.h>
88887 #include <linux/workqueue.h>
88888@@ -122,7 +123,7 @@ static DEFINE_SPINLOCK(hash_lock);
88889 * which we beg off on and pass to do_sys_settimeofday().
88890 */
88891
88892-static struct k_clock posix_clocks[MAX_CLOCKS];
88893+static struct k_clock *posix_clocks[MAX_CLOCKS];
88894
88895 /*
88896 * These ones are defined below.
88897@@ -275,7 +276,7 @@ static int posix_get_tai(clockid_t which_clock, struct timespec *tp)
88898 */
88899 static __init int init_posix_timers(void)
88900 {
88901- struct k_clock clock_realtime = {
88902+ static struct k_clock clock_realtime = {
88903 .clock_getres = hrtimer_get_res,
88904 .clock_get = posix_clock_realtime_get,
88905 .clock_set = posix_clock_realtime_set,
88906@@ -287,7 +288,7 @@ static __init int init_posix_timers(void)
88907 .timer_get = common_timer_get,
88908 .timer_del = common_timer_del,
88909 };
88910- struct k_clock clock_monotonic = {
88911+ static struct k_clock clock_monotonic = {
88912 .clock_getres = hrtimer_get_res,
88913 .clock_get = posix_ktime_get_ts,
88914 .nsleep = common_nsleep,
88915@@ -297,19 +298,19 @@ static __init int init_posix_timers(void)
88916 .timer_get = common_timer_get,
88917 .timer_del = common_timer_del,
88918 };
88919- struct k_clock clock_monotonic_raw = {
88920+ static struct k_clock clock_monotonic_raw = {
88921 .clock_getres = hrtimer_get_res,
88922 .clock_get = posix_get_monotonic_raw,
88923 };
88924- struct k_clock clock_realtime_coarse = {
88925+ static struct k_clock clock_realtime_coarse = {
88926 .clock_getres = posix_get_coarse_res,
88927 .clock_get = posix_get_realtime_coarse,
88928 };
88929- struct k_clock clock_monotonic_coarse = {
88930+ static struct k_clock clock_monotonic_coarse = {
88931 .clock_getres = posix_get_coarse_res,
88932 .clock_get = posix_get_monotonic_coarse,
88933 };
88934- struct k_clock clock_tai = {
88935+ static struct k_clock clock_tai = {
88936 .clock_getres = hrtimer_get_res,
88937 .clock_get = posix_get_tai,
88938 .nsleep = common_nsleep,
88939@@ -319,7 +320,7 @@ static __init int init_posix_timers(void)
88940 .timer_get = common_timer_get,
88941 .timer_del = common_timer_del,
88942 };
88943- struct k_clock clock_boottime = {
88944+ static struct k_clock clock_boottime = {
88945 .clock_getres = hrtimer_get_res,
88946 .clock_get = posix_get_boottime,
88947 .nsleep = common_nsleep,
88948@@ -531,7 +532,7 @@ void posix_timers_register_clock(const clockid_t clock_id,
88949 return;
88950 }
88951
88952- posix_clocks[clock_id] = *new_clock;
88953+ posix_clocks[clock_id] = new_clock;
88954 }
88955 EXPORT_SYMBOL_GPL(posix_timers_register_clock);
88956
88957@@ -577,9 +578,9 @@ static struct k_clock *clockid_to_kclock(const clockid_t id)
88958 return (id & CLOCKFD_MASK) == CLOCKFD ?
88959 &clock_posix_dynamic : &clock_posix_cpu;
88960
88961- if (id >= MAX_CLOCKS || !posix_clocks[id].clock_getres)
88962+ if (id >= MAX_CLOCKS || !posix_clocks[id] || !posix_clocks[id]->clock_getres)
88963 return NULL;
88964- return &posix_clocks[id];
88965+ return posix_clocks[id];
88966 }
88967
88968 static int common_timer_create(struct k_itimer *new_timer)
88969@@ -597,7 +598,7 @@ SYSCALL_DEFINE3(timer_create, const clockid_t, which_clock,
88970 struct k_clock *kc = clockid_to_kclock(which_clock);
88971 struct k_itimer *new_timer;
88972 int error, new_timer_id;
88973- sigevent_t event;
88974+ sigevent_t event = { };
88975 int it_id_set = IT_ID_NOT_SET;
88976
88977 if (!kc)
88978@@ -1011,6 +1012,13 @@ SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock,
88979 if (copy_from_user(&new_tp, tp, sizeof (*tp)))
88980 return -EFAULT;
88981
88982+ /* only the CLOCK_REALTIME clock can be set, all other clocks
88983+ have their clock_set fptr set to a nosettime dummy function
88984+ CLOCK_REALTIME has a NULL clock_set fptr which causes it to
88985+ call common_clock_set, which calls do_sys_settimeofday, which
88986+ we hook
88987+ */
88988+
88989 return kc->clock_set(which_clock, &new_tp);
88990 }
88991
88992diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig
88993index 2fac9cc..56fef29 100644
88994--- a/kernel/power/Kconfig
88995+++ b/kernel/power/Kconfig
88996@@ -24,6 +24,8 @@ config HIBERNATE_CALLBACKS
88997 config HIBERNATION
88998 bool "Hibernation (aka 'suspend to disk')"
88999 depends on SWAP && ARCH_HIBERNATION_POSSIBLE
89000+ depends on !GRKERNSEC_KMEM
89001+ depends on !PAX_MEMORY_SANITIZE
89002 select HIBERNATE_CALLBACKS
89003 select LZO_COMPRESS
89004 select LZO_DECOMPRESS
89005diff --git a/kernel/power/process.c b/kernel/power/process.c
89006index 06ec886..9dba35e 100644
89007--- a/kernel/power/process.c
89008+++ b/kernel/power/process.c
89009@@ -34,6 +34,7 @@ static int try_to_freeze_tasks(bool user_only)
89010 unsigned int elapsed_msecs;
89011 bool wakeup = false;
89012 int sleep_usecs = USEC_PER_MSEC;
89013+ bool timedout = false;
89014
89015 do_gettimeofday(&start);
89016
89017@@ -44,13 +45,20 @@ static int try_to_freeze_tasks(bool user_only)
89018
89019 while (true) {
89020 todo = 0;
89021+ if (time_after(jiffies, end_time))
89022+ timedout = true;
89023 read_lock(&tasklist_lock);
89024 do_each_thread(g, p) {
89025 if (p == current || !freeze_task(p))
89026 continue;
89027
89028- if (!freezer_should_skip(p))
89029+ if (!freezer_should_skip(p)) {
89030 todo++;
89031+ if (timedout) {
89032+ printk(KERN_ERR "Task refusing to freeze:\n");
89033+ sched_show_task(p);
89034+ }
89035+ }
89036 } while_each_thread(g, p);
89037 read_unlock(&tasklist_lock);
89038
89039@@ -59,7 +67,7 @@ static int try_to_freeze_tasks(bool user_only)
89040 todo += wq_busy;
89041 }
89042
89043- if (!todo || time_after(jiffies, end_time))
89044+ if (!todo || timedout)
89045 break;
89046
89047 if (pm_wakeup_pending()) {
89048diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
89049index 4dae9cb..039ffbb 100644
89050--- a/kernel/printk/printk.c
89051+++ b/kernel/printk/printk.c
89052@@ -385,6 +385,11 @@ static int check_syslog_permissions(int type, bool from_file)
89053 if (from_file && type != SYSLOG_ACTION_OPEN)
89054 return 0;
89055
89056+#ifdef CONFIG_GRKERNSEC_DMESG
89057+ if (grsec_enable_dmesg && !capable(CAP_SYSLOG) && !capable_nolog(CAP_SYS_ADMIN))
89058+ return -EPERM;
89059+#endif
89060+
89061 if (syslog_action_restricted(type)) {
89062 if (capable(CAP_SYSLOG))
89063 return 0;
89064diff --git a/kernel/profile.c b/kernel/profile.c
89065index ebdd9c1..612ee05 100644
89066--- a/kernel/profile.c
89067+++ b/kernel/profile.c
89068@@ -37,7 +37,7 @@ struct profile_hit {
89069 #define NR_PROFILE_HIT (PAGE_SIZE/sizeof(struct profile_hit))
89070 #define NR_PROFILE_GRP (NR_PROFILE_HIT/PROFILE_GRPSZ)
89071
89072-static atomic_t *prof_buffer;
89073+static atomic_unchecked_t *prof_buffer;
89074 static unsigned long prof_len, prof_shift;
89075
89076 int prof_on __read_mostly;
89077@@ -260,7 +260,7 @@ static void profile_flip_buffers(void)
89078 hits[i].pc = 0;
89079 continue;
89080 }
89081- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
89082+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
89083 hits[i].hits = hits[i].pc = 0;
89084 }
89085 }
89086@@ -321,9 +321,9 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
89087 * Add the current hit(s) and flush the write-queue out
89088 * to the global buffer:
89089 */
89090- atomic_add(nr_hits, &prof_buffer[pc]);
89091+ atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
89092 for (i = 0; i < NR_PROFILE_HIT; ++i) {
89093- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
89094+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
89095 hits[i].pc = hits[i].hits = 0;
89096 }
89097 out:
89098@@ -398,7 +398,7 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
89099 {
89100 unsigned long pc;
89101 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
89102- atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
89103+ atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
89104 }
89105 #endif /* !CONFIG_SMP */
89106
89107@@ -494,7 +494,7 @@ read_profile(struct file *file, char __user *buf, size_t count, loff_t *ppos)
89108 return -EFAULT;
89109 buf++; p++; count--; read++;
89110 }
89111- pnt = (char *)prof_buffer + p - sizeof(atomic_t);
89112+ pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
89113 if (copy_to_user(buf, (void *)pnt, count))
89114 return -EFAULT;
89115 read += count;
89116@@ -525,7 +525,7 @@ static ssize_t write_profile(struct file *file, const char __user *buf,
89117 }
89118 #endif
89119 profile_discard_flip_buffers();
89120- memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
89121+ memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
89122 return count;
89123 }
89124
89125diff --git a/kernel/ptrace.c b/kernel/ptrace.c
89126index 1f4bcb3..99cf7ab 100644
89127--- a/kernel/ptrace.c
89128+++ b/kernel/ptrace.c
89129@@ -327,7 +327,7 @@ static int ptrace_attach(struct task_struct *task, long request,
89130 if (seize)
89131 flags |= PT_SEIZED;
89132 rcu_read_lock();
89133- if (ns_capable(__task_cred(task)->user_ns, CAP_SYS_PTRACE))
89134+ if (ns_capable_nolog(__task_cred(task)->user_ns, CAP_SYS_PTRACE))
89135 flags |= PT_PTRACE_CAP;
89136 rcu_read_unlock();
89137 task->ptrace = flags;
89138@@ -538,7 +538,7 @@ int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst
89139 break;
89140 return -EIO;
89141 }
89142- if (copy_to_user(dst, buf, retval))
89143+ if (retval > sizeof(buf) || copy_to_user(dst, buf, retval))
89144 return -EFAULT;
89145 copied += retval;
89146 src += retval;
89147@@ -806,7 +806,7 @@ int ptrace_request(struct task_struct *child, long request,
89148 bool seized = child->ptrace & PT_SEIZED;
89149 int ret = -EIO;
89150 siginfo_t siginfo, *si;
89151- void __user *datavp = (void __user *) data;
89152+ void __user *datavp = (__force void __user *) data;
89153 unsigned long __user *datalp = datavp;
89154 unsigned long flags;
89155
89156@@ -1052,14 +1052,21 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
89157 goto out;
89158 }
89159
89160+ if (gr_handle_ptrace(child, request)) {
89161+ ret = -EPERM;
89162+ goto out_put_task_struct;
89163+ }
89164+
89165 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
89166 ret = ptrace_attach(child, request, addr, data);
89167 /*
89168 * Some architectures need to do book-keeping after
89169 * a ptrace attach.
89170 */
89171- if (!ret)
89172+ if (!ret) {
89173 arch_ptrace_attach(child);
89174+ gr_audit_ptrace(child);
89175+ }
89176 goto out_put_task_struct;
89177 }
89178
89179@@ -1087,7 +1094,7 @@ int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
89180 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
89181 if (copied != sizeof(tmp))
89182 return -EIO;
89183- return put_user(tmp, (unsigned long __user *)data);
89184+ return put_user(tmp, (__force unsigned long __user *)data);
89185 }
89186
89187 int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
89188@@ -1181,7 +1188,7 @@ int compat_ptrace_request(struct task_struct *child, compat_long_t request,
89189 }
89190
89191 asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
89192- compat_long_t addr, compat_long_t data)
89193+ compat_ulong_t addr, compat_ulong_t data)
89194 {
89195 struct task_struct *child;
89196 long ret;
89197@@ -1197,14 +1204,21 @@ asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
89198 goto out;
89199 }
89200
89201+ if (gr_handle_ptrace(child, request)) {
89202+ ret = -EPERM;
89203+ goto out_put_task_struct;
89204+ }
89205+
89206 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
89207 ret = ptrace_attach(child, request, addr, data);
89208 /*
89209 * Some architectures need to do book-keeping after
89210 * a ptrace attach.
89211 */
89212- if (!ret)
89213+ if (!ret) {
89214 arch_ptrace_attach(child);
89215+ gr_audit_ptrace(child);
89216+ }
89217 goto out_put_task_struct;
89218 }
89219
89220diff --git a/kernel/rcu/srcu.c b/kernel/rcu/srcu.c
89221index 3318d82..1a5b2d1 100644
89222--- a/kernel/rcu/srcu.c
89223+++ b/kernel/rcu/srcu.c
89224@@ -300,9 +300,9 @@ int __srcu_read_lock(struct srcu_struct *sp)
89225
89226 idx = ACCESS_ONCE(sp->completed) & 0x1;
89227 preempt_disable();
89228- ACCESS_ONCE(this_cpu_ptr(sp->per_cpu_ref)->c[idx]) += 1;
89229+ ACCESS_ONCE_RW(this_cpu_ptr(sp->per_cpu_ref)->c[idx]) += 1;
89230 smp_mb(); /* B */ /* Avoid leaking the critical section. */
89231- ACCESS_ONCE(this_cpu_ptr(sp->per_cpu_ref)->seq[idx]) += 1;
89232+ ACCESS_ONCE_RW(this_cpu_ptr(sp->per_cpu_ref)->seq[idx]) += 1;
89233 preempt_enable();
89234 return idx;
89235 }
89236diff --git a/kernel/rcu/tiny.c b/kernel/rcu/tiny.c
89237index 1254f31..16258dc 100644
89238--- a/kernel/rcu/tiny.c
89239+++ b/kernel/rcu/tiny.c
89240@@ -46,7 +46,7 @@
89241 /* Forward declarations for tiny_plugin.h. */
89242 struct rcu_ctrlblk;
89243 static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp);
89244-static void rcu_process_callbacks(struct softirq_action *unused);
89245+static void rcu_process_callbacks(void);
89246 static void __call_rcu(struct rcu_head *head,
89247 void (*func)(struct rcu_head *rcu),
89248 struct rcu_ctrlblk *rcp);
89249@@ -312,7 +312,7 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp)
89250 false));
89251 }
89252
89253-static void rcu_process_callbacks(struct softirq_action *unused)
89254+static __latent_entropy void rcu_process_callbacks(void)
89255 {
89256 __rcu_process_callbacks(&rcu_sched_ctrlblk);
89257 __rcu_process_callbacks(&rcu_bh_ctrlblk);
89258diff --git a/kernel/rcu/torture.c b/kernel/rcu/torture.c
89259index 732f8ae..42c1919 100644
89260--- a/kernel/rcu/torture.c
89261+++ b/kernel/rcu/torture.c
89262@@ -174,12 +174,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count) =
89263 { 0 };
89264 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
89265 { 0 };
89266-static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
89267-static atomic_t n_rcu_torture_alloc;
89268-static atomic_t n_rcu_torture_alloc_fail;
89269-static atomic_t n_rcu_torture_free;
89270-static atomic_t n_rcu_torture_mberror;
89271-static atomic_t n_rcu_torture_error;
89272+static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
89273+static atomic_unchecked_t n_rcu_torture_alloc;
89274+static atomic_unchecked_t n_rcu_torture_alloc_fail;
89275+static atomic_unchecked_t n_rcu_torture_free;
89276+static atomic_unchecked_t n_rcu_torture_mberror;
89277+static atomic_unchecked_t n_rcu_torture_error;
89278 static long n_rcu_torture_barrier_error;
89279 static long n_rcu_torture_boost_ktrerror;
89280 static long n_rcu_torture_boost_rterror;
89281@@ -297,11 +297,11 @@ rcu_torture_alloc(void)
89282
89283 spin_lock_bh(&rcu_torture_lock);
89284 if (list_empty(&rcu_torture_freelist)) {
89285- atomic_inc(&n_rcu_torture_alloc_fail);
89286+ atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
89287 spin_unlock_bh(&rcu_torture_lock);
89288 return NULL;
89289 }
89290- atomic_inc(&n_rcu_torture_alloc);
89291+ atomic_inc_unchecked(&n_rcu_torture_alloc);
89292 p = rcu_torture_freelist.next;
89293 list_del_init(p);
89294 spin_unlock_bh(&rcu_torture_lock);
89295@@ -314,7 +314,7 @@ rcu_torture_alloc(void)
89296 static void
89297 rcu_torture_free(struct rcu_torture *p)
89298 {
89299- atomic_inc(&n_rcu_torture_free);
89300+ atomic_inc_unchecked(&n_rcu_torture_free);
89301 spin_lock_bh(&rcu_torture_lock);
89302 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
89303 spin_unlock_bh(&rcu_torture_lock);
89304@@ -435,7 +435,7 @@ rcu_torture_cb(struct rcu_head *p)
89305 i = rp->rtort_pipe_count;
89306 if (i > RCU_TORTURE_PIPE_LEN)
89307 i = RCU_TORTURE_PIPE_LEN;
89308- atomic_inc(&rcu_torture_wcount[i]);
89309+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
89310 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
89311 rp->rtort_mbtest = 0;
89312 rcu_torture_free(rp);
89313@@ -823,7 +823,7 @@ rcu_torture_writer(void *arg)
89314 i = old_rp->rtort_pipe_count;
89315 if (i > RCU_TORTURE_PIPE_LEN)
89316 i = RCU_TORTURE_PIPE_LEN;
89317- atomic_inc(&rcu_torture_wcount[i]);
89318+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
89319 old_rp->rtort_pipe_count++;
89320 if (gp_normal == gp_exp)
89321 exp = !!(rcu_random(&rand) & 0x80);
89322@@ -841,7 +841,7 @@ rcu_torture_writer(void *arg)
89323 i = rp->rtort_pipe_count;
89324 if (i > RCU_TORTURE_PIPE_LEN)
89325 i = RCU_TORTURE_PIPE_LEN;
89326- atomic_inc(&rcu_torture_wcount[i]);
89327+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
89328 if (++rp->rtort_pipe_count >=
89329 RCU_TORTURE_PIPE_LEN) {
89330 rp->rtort_mbtest = 0;
89331@@ -940,7 +940,7 @@ static void rcu_torture_timer(unsigned long unused)
89332 return;
89333 }
89334 if (p->rtort_mbtest == 0)
89335- atomic_inc(&n_rcu_torture_mberror);
89336+ atomic_inc_unchecked(&n_rcu_torture_mberror);
89337 spin_lock(&rand_lock);
89338 cur_ops->read_delay(&rand);
89339 n_rcu_torture_timers++;
89340@@ -1010,7 +1010,7 @@ rcu_torture_reader(void *arg)
89341 continue;
89342 }
89343 if (p->rtort_mbtest == 0)
89344- atomic_inc(&n_rcu_torture_mberror);
89345+ atomic_inc_unchecked(&n_rcu_torture_mberror);
89346 cur_ops->read_delay(&rand);
89347 preempt_disable();
89348 pipe_count = p->rtort_pipe_count;
89349@@ -1068,15 +1068,15 @@ rcu_torture_printk(char *page)
89350 }
89351 page += sprintf(page, "%s%s ", torture_type, TORTURE_FLAG);
89352 page += sprintf(page,
89353- "rtc: %p ver: %lu tfle: %d rta: %d rtaf: %d rtf: %d ",
89354+ "rtc: %pP ver: %lu tfle: %d rta: %d rtaf: %d rtf: %d ",
89355 rcu_torture_current,
89356 rcu_torture_current_version,
89357 list_empty(&rcu_torture_freelist),
89358- atomic_read(&n_rcu_torture_alloc),
89359- atomic_read(&n_rcu_torture_alloc_fail),
89360- atomic_read(&n_rcu_torture_free));
89361+ atomic_read_unchecked(&n_rcu_torture_alloc),
89362+ atomic_read_unchecked(&n_rcu_torture_alloc_fail),
89363+ atomic_read_unchecked(&n_rcu_torture_free));
89364 page += sprintf(page, "rtmbe: %d rtbke: %ld rtbre: %ld ",
89365- atomic_read(&n_rcu_torture_mberror),
89366+ atomic_read_unchecked(&n_rcu_torture_mberror),
89367 n_rcu_torture_boost_ktrerror,
89368 n_rcu_torture_boost_rterror);
89369 page += sprintf(page, "rtbf: %ld rtb: %ld nt: %ld ",
89370@@ -1095,14 +1095,14 @@ rcu_torture_printk(char *page)
89371 n_barrier_attempts,
89372 n_rcu_torture_barrier_error);
89373 page += sprintf(page, "\n%s%s ", torture_type, TORTURE_FLAG);
89374- if (atomic_read(&n_rcu_torture_mberror) != 0 ||
89375+ if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0 ||
89376 n_rcu_torture_barrier_error != 0 ||
89377 n_rcu_torture_boost_ktrerror != 0 ||
89378 n_rcu_torture_boost_rterror != 0 ||
89379 n_rcu_torture_boost_failure != 0 ||
89380 i > 1) {
89381 page += sprintf(page, "!!! ");
89382- atomic_inc(&n_rcu_torture_error);
89383+ atomic_inc_unchecked(&n_rcu_torture_error);
89384 WARN_ON_ONCE(1);
89385 }
89386 page += sprintf(page, "Reader Pipe: ");
89387@@ -1116,7 +1116,7 @@ rcu_torture_printk(char *page)
89388 page += sprintf(page, "Free-Block Circulation: ");
89389 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
89390 page += sprintf(page, " %d",
89391- atomic_read(&rcu_torture_wcount[i]));
89392+ atomic_read_unchecked(&rcu_torture_wcount[i]));
89393 }
89394 page += sprintf(page, "\n");
89395 if (cur_ops->stats)
89396@@ -1839,7 +1839,7 @@ rcu_torture_cleanup(void)
89397
89398 rcu_torture_stats_print(); /* -After- the stats thread is stopped! */
89399
89400- if (atomic_read(&n_rcu_torture_error) || n_rcu_torture_barrier_error)
89401+ if (atomic_read_unchecked(&n_rcu_torture_error) || n_rcu_torture_barrier_error)
89402 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
89403 else if (n_online_successes != n_online_attempts ||
89404 n_offline_successes != n_offline_attempts)
89405@@ -1961,18 +1961,18 @@ rcu_torture_init(void)
89406
89407 rcu_torture_current = NULL;
89408 rcu_torture_current_version = 0;
89409- atomic_set(&n_rcu_torture_alloc, 0);
89410- atomic_set(&n_rcu_torture_alloc_fail, 0);
89411- atomic_set(&n_rcu_torture_free, 0);
89412- atomic_set(&n_rcu_torture_mberror, 0);
89413- atomic_set(&n_rcu_torture_error, 0);
89414+ atomic_set_unchecked(&n_rcu_torture_alloc, 0);
89415+ atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
89416+ atomic_set_unchecked(&n_rcu_torture_free, 0);
89417+ atomic_set_unchecked(&n_rcu_torture_mberror, 0);
89418+ atomic_set_unchecked(&n_rcu_torture_error, 0);
89419 n_rcu_torture_barrier_error = 0;
89420 n_rcu_torture_boost_ktrerror = 0;
89421 n_rcu_torture_boost_rterror = 0;
89422 n_rcu_torture_boost_failure = 0;
89423 n_rcu_torture_boosts = 0;
89424 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
89425- atomic_set(&rcu_torture_wcount[i], 0);
89426+ atomic_set_unchecked(&rcu_torture_wcount[i], 0);
89427 for_each_possible_cpu(cpu) {
89428 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
89429 per_cpu(rcu_torture_count, cpu)[i] = 0;
89430diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
89431index b3d116c..ebf6598 100644
89432--- a/kernel/rcu/tree.c
89433+++ b/kernel/rcu/tree.c
89434@@ -390,9 +390,9 @@ static void rcu_eqs_enter_common(struct rcu_dynticks *rdtp, long long oldval,
89435 rcu_prepare_for_idle(smp_processor_id());
89436 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
89437 smp_mb__before_atomic_inc(); /* See above. */
89438- atomic_inc(&rdtp->dynticks);
89439+ atomic_inc_unchecked(&rdtp->dynticks);
89440 smp_mb__after_atomic_inc(); /* Force ordering with next sojourn. */
89441- WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
89442+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
89443
89444 /*
89445 * It is illegal to enter an extended quiescent state while
89446@@ -510,10 +510,10 @@ static void rcu_eqs_exit_common(struct rcu_dynticks *rdtp, long long oldval,
89447 int user)
89448 {
89449 smp_mb__before_atomic_inc(); /* Force ordering w/previous sojourn. */
89450- atomic_inc(&rdtp->dynticks);
89451+ atomic_inc_unchecked(&rdtp->dynticks);
89452 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
89453 smp_mb__after_atomic_inc(); /* See above. */
89454- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
89455+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
89456 rcu_cleanup_after_idle(smp_processor_id());
89457 trace_rcu_dyntick(TPS("End"), oldval, rdtp->dynticks_nesting);
89458 if (!user && !is_idle_task(current)) {
89459@@ -634,14 +634,14 @@ void rcu_nmi_enter(void)
89460 struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
89461
89462 if (rdtp->dynticks_nmi_nesting == 0 &&
89463- (atomic_read(&rdtp->dynticks) & 0x1))
89464+ (atomic_read_unchecked(&rdtp->dynticks) & 0x1))
89465 return;
89466 rdtp->dynticks_nmi_nesting++;
89467 smp_mb__before_atomic_inc(); /* Force delay from prior write. */
89468- atomic_inc(&rdtp->dynticks);
89469+ atomic_inc_unchecked(&rdtp->dynticks);
89470 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
89471 smp_mb__after_atomic_inc(); /* See above. */
89472- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
89473+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
89474 }
89475
89476 /**
89477@@ -660,9 +660,9 @@ void rcu_nmi_exit(void)
89478 return;
89479 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
89480 smp_mb__before_atomic_inc(); /* See above. */
89481- atomic_inc(&rdtp->dynticks);
89482+ atomic_inc_unchecked(&rdtp->dynticks);
89483 smp_mb__after_atomic_inc(); /* Force delay to next write. */
89484- WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
89485+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
89486 }
89487
89488 /**
89489@@ -675,7 +675,7 @@ void rcu_nmi_exit(void)
89490 */
89491 bool notrace __rcu_is_watching(void)
89492 {
89493- return atomic_read(this_cpu_ptr(&rcu_dynticks.dynticks)) & 0x1;
89494+ return atomic_read_unchecked(this_cpu_ptr(&rcu_dynticks.dynticks)) & 0x1;
89495 }
89496
89497 /**
89498@@ -758,7 +758,7 @@ static int rcu_is_cpu_rrupt_from_idle(void)
89499 static int dyntick_save_progress_counter(struct rcu_data *rdp,
89500 bool *isidle, unsigned long *maxj)
89501 {
89502- rdp->dynticks_snap = atomic_add_return(0, &rdp->dynticks->dynticks);
89503+ rdp->dynticks_snap = atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
89504 rcu_sysidle_check_cpu(rdp, isidle, maxj);
89505 return (rdp->dynticks_snap & 0x1) == 0;
89506 }
89507@@ -781,7 +781,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp,
89508 unsigned int curr;
89509 unsigned int snap;
89510
89511- curr = (unsigned int)atomic_add_return(0, &rdp->dynticks->dynticks);
89512+ curr = (unsigned int)atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
89513 snap = (unsigned int)rdp->dynticks_snap;
89514
89515 /*
89516@@ -1450,9 +1450,9 @@ static int rcu_gp_init(struct rcu_state *rsp)
89517 rdp = this_cpu_ptr(rsp->rda);
89518 rcu_preempt_check_blocked_tasks(rnp);
89519 rnp->qsmask = rnp->qsmaskinit;
89520- ACCESS_ONCE(rnp->gpnum) = rsp->gpnum;
89521+ ACCESS_ONCE_RW(rnp->gpnum) = rsp->gpnum;
89522 WARN_ON_ONCE(rnp->completed != rsp->completed);
89523- ACCESS_ONCE(rnp->completed) = rsp->completed;
89524+ ACCESS_ONCE_RW(rnp->completed) = rsp->completed;
89525 if (rnp == rdp->mynode)
89526 __note_gp_changes(rsp, rnp, rdp);
89527 rcu_preempt_boost_start_gp(rnp);
89528@@ -1546,7 +1546,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
89529 rcu_for_each_node_breadth_first(rsp, rnp) {
89530 raw_spin_lock_irq(&rnp->lock);
89531 smp_mb__after_unlock_lock();
89532- ACCESS_ONCE(rnp->completed) = rsp->gpnum;
89533+ ACCESS_ONCE_RW(rnp->completed) = rsp->gpnum;
89534 rdp = this_cpu_ptr(rsp->rda);
89535 if (rnp == rdp->mynode)
89536 __note_gp_changes(rsp, rnp, rdp);
89537@@ -1912,7 +1912,7 @@ rcu_send_cbs_to_orphanage(int cpu, struct rcu_state *rsp,
89538 rsp->qlen += rdp->qlen;
89539 rdp->n_cbs_orphaned += rdp->qlen;
89540 rdp->qlen_lazy = 0;
89541- ACCESS_ONCE(rdp->qlen) = 0;
89542+ ACCESS_ONCE_RW(rdp->qlen) = 0;
89543 }
89544
89545 /*
89546@@ -2159,7 +2159,7 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
89547 }
89548 smp_mb(); /* List handling before counting for rcu_barrier(). */
89549 rdp->qlen_lazy -= count_lazy;
89550- ACCESS_ONCE(rdp->qlen) -= count;
89551+ ACCESS_ONCE_RW(rdp->qlen) -= count;
89552 rdp->n_cbs_invoked += count;
89553
89554 /* Reinstate batch limit if we have worked down the excess. */
89555@@ -2362,7 +2362,7 @@ __rcu_process_callbacks(struct rcu_state *rsp)
89556 /*
89557 * Do RCU core processing for the current CPU.
89558 */
89559-static void rcu_process_callbacks(struct softirq_action *unused)
89560+static void rcu_process_callbacks(void)
89561 {
89562 struct rcu_state *rsp;
89563
89564@@ -2470,7 +2470,7 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
89565 WARN_ON_ONCE((unsigned long)head & 0x3); /* Misaligned rcu_head! */
89566 if (debug_rcu_head_queue(head)) {
89567 /* Probable double call_rcu(), so leak the callback. */
89568- ACCESS_ONCE(head->func) = rcu_leak_callback;
89569+ ACCESS_ONCE_RW(head->func) = rcu_leak_callback;
89570 WARN_ONCE(1, "__call_rcu(): Leaked duplicate callback\n");
89571 return;
89572 }
89573@@ -2498,7 +2498,7 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
89574 local_irq_restore(flags);
89575 return;
89576 }
89577- ACCESS_ONCE(rdp->qlen)++;
89578+ ACCESS_ONCE_RW(rdp->qlen)++;
89579 if (lazy)
89580 rdp->qlen_lazy++;
89581 else
89582@@ -2707,11 +2707,11 @@ void synchronize_sched_expedited(void)
89583 * counter wrap on a 32-bit system. Quite a few more CPUs would of
89584 * course be required on a 64-bit system.
89585 */
89586- if (ULONG_CMP_GE((ulong)atomic_long_read(&rsp->expedited_start),
89587+ if (ULONG_CMP_GE((ulong)atomic_long_read_unchecked(&rsp->expedited_start),
89588 (ulong)atomic_long_read(&rsp->expedited_done) +
89589 ULONG_MAX / 8)) {
89590 synchronize_sched();
89591- atomic_long_inc(&rsp->expedited_wrap);
89592+ atomic_long_inc_unchecked(&rsp->expedited_wrap);
89593 return;
89594 }
89595
89596@@ -2719,7 +2719,7 @@ void synchronize_sched_expedited(void)
89597 * Take a ticket. Note that atomic_inc_return() implies a
89598 * full memory barrier.
89599 */
89600- snap = atomic_long_inc_return(&rsp->expedited_start);
89601+ snap = atomic_long_inc_return_unchecked(&rsp->expedited_start);
89602 firstsnap = snap;
89603 get_online_cpus();
89604 WARN_ON_ONCE(cpu_is_offline(raw_smp_processor_id()));
89605@@ -2732,14 +2732,14 @@ void synchronize_sched_expedited(void)
89606 synchronize_sched_expedited_cpu_stop,
89607 NULL) == -EAGAIN) {
89608 put_online_cpus();
89609- atomic_long_inc(&rsp->expedited_tryfail);
89610+ atomic_long_inc_unchecked(&rsp->expedited_tryfail);
89611
89612 /* Check to see if someone else did our work for us. */
89613 s = atomic_long_read(&rsp->expedited_done);
89614 if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) {
89615 /* ensure test happens before caller kfree */
89616 smp_mb__before_atomic_inc(); /* ^^^ */
89617- atomic_long_inc(&rsp->expedited_workdone1);
89618+ atomic_long_inc_unchecked(&rsp->expedited_workdone1);
89619 return;
89620 }
89621
89622@@ -2748,7 +2748,7 @@ void synchronize_sched_expedited(void)
89623 udelay(trycount * num_online_cpus());
89624 } else {
89625 wait_rcu_gp(call_rcu_sched);
89626- atomic_long_inc(&rsp->expedited_normal);
89627+ atomic_long_inc_unchecked(&rsp->expedited_normal);
89628 return;
89629 }
89630
89631@@ -2757,7 +2757,7 @@ void synchronize_sched_expedited(void)
89632 if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) {
89633 /* ensure test happens before caller kfree */
89634 smp_mb__before_atomic_inc(); /* ^^^ */
89635- atomic_long_inc(&rsp->expedited_workdone2);
89636+ atomic_long_inc_unchecked(&rsp->expedited_workdone2);
89637 return;
89638 }
89639
89640@@ -2769,10 +2769,10 @@ void synchronize_sched_expedited(void)
89641 * period works for us.
89642 */
89643 get_online_cpus();
89644- snap = atomic_long_read(&rsp->expedited_start);
89645+ snap = atomic_long_read_unchecked(&rsp->expedited_start);
89646 smp_mb(); /* ensure read is before try_stop_cpus(). */
89647 }
89648- atomic_long_inc(&rsp->expedited_stoppedcpus);
89649+ atomic_long_inc_unchecked(&rsp->expedited_stoppedcpus);
89650
89651 /*
89652 * Everyone up to our most recent fetch is covered by our grace
89653@@ -2781,16 +2781,16 @@ void synchronize_sched_expedited(void)
89654 * than we did already did their update.
89655 */
89656 do {
89657- atomic_long_inc(&rsp->expedited_done_tries);
89658+ atomic_long_inc_unchecked(&rsp->expedited_done_tries);
89659 s = atomic_long_read(&rsp->expedited_done);
89660 if (ULONG_CMP_GE((ulong)s, (ulong)snap)) {
89661 /* ensure test happens before caller kfree */
89662 smp_mb__before_atomic_inc(); /* ^^^ */
89663- atomic_long_inc(&rsp->expedited_done_lost);
89664+ atomic_long_inc_unchecked(&rsp->expedited_done_lost);
89665 break;
89666 }
89667 } while (atomic_long_cmpxchg(&rsp->expedited_done, s, snap) != s);
89668- atomic_long_inc(&rsp->expedited_done_exit);
89669+ atomic_long_inc_unchecked(&rsp->expedited_done_exit);
89670
89671 put_online_cpus();
89672 }
89673@@ -2996,7 +2996,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
89674 * ACCESS_ONCE() to prevent the compiler from speculating
89675 * the increment to precede the early-exit check.
89676 */
89677- ACCESS_ONCE(rsp->n_barrier_done)++;
89678+ ACCESS_ONCE_RW(rsp->n_barrier_done)++;
89679 WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 1);
89680 _rcu_barrier_trace(rsp, "Inc1", -1, rsp->n_barrier_done);
89681 smp_mb(); /* Order ->n_barrier_done increment with below mechanism. */
89682@@ -3046,7 +3046,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
89683
89684 /* Increment ->n_barrier_done to prevent duplicate work. */
89685 smp_mb(); /* Keep increment after above mechanism. */
89686- ACCESS_ONCE(rsp->n_barrier_done)++;
89687+ ACCESS_ONCE_RW(rsp->n_barrier_done)++;
89688 WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 0);
89689 _rcu_barrier_trace(rsp, "Inc2", -1, rsp->n_barrier_done);
89690 smp_mb(); /* Keep increment before caller's subsequent code. */
89691@@ -3091,10 +3091,10 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
89692 rdp->grpmask = 1UL << (cpu - rdp->mynode->grplo);
89693 init_callback_list(rdp);
89694 rdp->qlen_lazy = 0;
89695- ACCESS_ONCE(rdp->qlen) = 0;
89696+ ACCESS_ONCE_RW(rdp->qlen) = 0;
89697 rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
89698 WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_EXIT_IDLE);
89699- WARN_ON_ONCE(atomic_read(&rdp->dynticks->dynticks) != 1);
89700+ WARN_ON_ONCE(atomic_read_unchecked(&rdp->dynticks->dynticks) != 1);
89701 rdp->cpu = cpu;
89702 rdp->rsp = rsp;
89703 rcu_boot_init_nocb_percpu_data(rdp);
89704@@ -3128,8 +3128,8 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptible)
89705 init_callback_list(rdp); /* Re-enable callbacks on this CPU. */
89706 rdp->dynticks->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
89707 rcu_sysidle_init_percpu_data(rdp->dynticks);
89708- atomic_set(&rdp->dynticks->dynticks,
89709- (atomic_read(&rdp->dynticks->dynticks) & ~0x1) + 1);
89710+ atomic_set_unchecked(&rdp->dynticks->dynticks,
89711+ (atomic_read_unchecked(&rdp->dynticks->dynticks) & ~0x1) + 1);
89712 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
89713
89714 /* Add CPU to rcu_node bitmasks. */
89715diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h
89716index 8c19873..bf83c57 100644
89717--- a/kernel/rcu/tree.h
89718+++ b/kernel/rcu/tree.h
89719@@ -87,11 +87,11 @@ struct rcu_dynticks {
89720 long long dynticks_nesting; /* Track irq/process nesting level. */
89721 /* Process level is worth LLONG_MAX/2. */
89722 int dynticks_nmi_nesting; /* Track NMI nesting level. */
89723- atomic_t dynticks; /* Even value for idle, else odd. */
89724+ atomic_unchecked_t dynticks;/* Even value for idle, else odd. */
89725 #ifdef CONFIG_NO_HZ_FULL_SYSIDLE
89726 long long dynticks_idle_nesting;
89727 /* irq/process nesting level from idle. */
89728- atomic_t dynticks_idle; /* Even value for idle, else odd. */
89729+ atomic_unchecked_t dynticks_idle;/* Even value for idle, else odd. */
89730 /* "Idle" excludes userspace execution. */
89731 unsigned long dynticks_idle_jiffies;
89732 /* End of last non-NMI non-idle period. */
89733@@ -431,17 +431,17 @@ struct rcu_state {
89734 /* _rcu_barrier(). */
89735 /* End of fields guarded by barrier_mutex. */
89736
89737- atomic_long_t expedited_start; /* Starting ticket. */
89738- atomic_long_t expedited_done; /* Done ticket. */
89739- atomic_long_t expedited_wrap; /* # near-wrap incidents. */
89740- atomic_long_t expedited_tryfail; /* # acquisition failures. */
89741- atomic_long_t expedited_workdone1; /* # done by others #1. */
89742- atomic_long_t expedited_workdone2; /* # done by others #2. */
89743- atomic_long_t expedited_normal; /* # fallbacks to normal. */
89744- atomic_long_t expedited_stoppedcpus; /* # successful stop_cpus. */
89745- atomic_long_t expedited_done_tries; /* # tries to update _done. */
89746- atomic_long_t expedited_done_lost; /* # times beaten to _done. */
89747- atomic_long_t expedited_done_exit; /* # times exited _done loop. */
89748+ atomic_long_unchecked_t expedited_start; /* Starting ticket. */
89749+ atomic_long_t expedited_done; /* Done ticket. */
89750+ atomic_long_unchecked_t expedited_wrap; /* # near-wrap incidents. */
89751+ atomic_long_unchecked_t expedited_tryfail; /* # acquisition failures. */
89752+ atomic_long_unchecked_t expedited_workdone1; /* # done by others #1. */
89753+ atomic_long_unchecked_t expedited_workdone2; /* # done by others #2. */
89754+ atomic_long_unchecked_t expedited_normal; /* # fallbacks to normal. */
89755+ atomic_long_unchecked_t expedited_stoppedcpus; /* # successful stop_cpus. */
89756+ atomic_long_unchecked_t expedited_done_tries; /* # tries to update _done. */
89757+ atomic_long_unchecked_t expedited_done_lost; /* # times beaten to _done. */
89758+ atomic_long_unchecked_t expedited_done_exit; /* # times exited _done loop. */
89759
89760 unsigned long jiffies_force_qs; /* Time at which to invoke */
89761 /* force_quiescent_state(). */
89762diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
89763index 6e2ef4b..c15df94 100644
89764--- a/kernel/rcu/tree_plugin.h
89765+++ b/kernel/rcu/tree_plugin.h
89766@@ -758,7 +758,7 @@ static int rcu_preempted_readers_exp(struct rcu_node *rnp)
89767 static int sync_rcu_preempt_exp_done(struct rcu_node *rnp)
89768 {
89769 return !rcu_preempted_readers_exp(rnp) &&
89770- ACCESS_ONCE(rnp->expmask) == 0;
89771+ ACCESS_ONCE_RW(rnp->expmask) == 0;
89772 }
89773
89774 /*
89775@@ -920,7 +920,7 @@ void synchronize_rcu_expedited(void)
89776
89777 /* Clean up and exit. */
89778 smp_mb(); /* ensure expedited GP seen before counter increment. */
89779- ACCESS_ONCE(sync_rcu_preempt_exp_count)++;
89780+ ACCESS_ONCE_RW(sync_rcu_preempt_exp_count)++;
89781 unlock_mb_ret:
89782 mutex_unlock(&sync_rcu_preempt_exp_mutex);
89783 mb_ret:
89784@@ -1496,7 +1496,7 @@ static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
89785 free_cpumask_var(cm);
89786 }
89787
89788-static struct smp_hotplug_thread rcu_cpu_thread_spec = {
89789+static struct smp_hotplug_thread rcu_cpu_thread_spec __read_only = {
89790 .store = &rcu_cpu_kthread_task,
89791 .thread_should_run = rcu_cpu_kthread_should_run,
89792 .thread_fn = rcu_cpu_kthread,
89793@@ -1965,7 +1965,7 @@ static void print_cpu_stall_info(struct rcu_state *rsp, int cpu)
89794 print_cpu_stall_fast_no_hz(fast_no_hz, cpu);
89795 pr_err("\t%d: (%lu %s) idle=%03x/%llx/%d softirq=%u/%u %s\n",
89796 cpu, ticks_value, ticks_title,
89797- atomic_read(&rdtp->dynticks) & 0xfff,
89798+ atomic_read_unchecked(&rdtp->dynticks) & 0xfff,
89799 rdtp->dynticks_nesting, rdtp->dynticks_nmi_nesting,
89800 rdp->softirq_snap, kstat_softirqs_cpu(RCU_SOFTIRQ, cpu),
89801 fast_no_hz);
89802@@ -2129,7 +2129,7 @@ static void __call_rcu_nocb_enqueue(struct rcu_data *rdp,
89803
89804 /* Enqueue the callback on the nocb list and update counts. */
89805 old_rhpp = xchg(&rdp->nocb_tail, rhtp);
89806- ACCESS_ONCE(*old_rhpp) = rhp;
89807+ ACCESS_ONCE_RW(*old_rhpp) = rhp;
89808 atomic_long_add(rhcount, &rdp->nocb_q_count);
89809 atomic_long_add(rhcount_lazy, &rdp->nocb_q_count_lazy);
89810
89811@@ -2302,12 +2302,12 @@ static int rcu_nocb_kthread(void *arg)
89812 * Extract queued callbacks, update counts, and wait
89813 * for a grace period to elapse.
89814 */
89815- ACCESS_ONCE(rdp->nocb_head) = NULL;
89816+ ACCESS_ONCE_RW(rdp->nocb_head) = NULL;
89817 tail = xchg(&rdp->nocb_tail, &rdp->nocb_head);
89818 c = atomic_long_xchg(&rdp->nocb_q_count, 0);
89819 cl = atomic_long_xchg(&rdp->nocb_q_count_lazy, 0);
89820- ACCESS_ONCE(rdp->nocb_p_count) += c;
89821- ACCESS_ONCE(rdp->nocb_p_count_lazy) += cl;
89822+ ACCESS_ONCE_RW(rdp->nocb_p_count) += c;
89823+ ACCESS_ONCE_RW(rdp->nocb_p_count_lazy) += cl;
89824 rcu_nocb_wait_gp(rdp);
89825
89826 /* Each pass through the following loop invokes a callback. */
89827@@ -2333,8 +2333,8 @@ static int rcu_nocb_kthread(void *arg)
89828 list = next;
89829 }
89830 trace_rcu_batch_end(rdp->rsp->name, c, !!list, 0, 0, 1);
89831- ACCESS_ONCE(rdp->nocb_p_count) -= c;
89832- ACCESS_ONCE(rdp->nocb_p_count_lazy) -= cl;
89833+ ACCESS_ONCE_RW(rdp->nocb_p_count) -= c;
89834+ ACCESS_ONCE_RW(rdp->nocb_p_count_lazy) -= cl;
89835 rdp->n_nocbs_invoked += c;
89836 }
89837 return 0;
89838@@ -2351,7 +2351,7 @@ static void do_nocb_deferred_wakeup(struct rcu_data *rdp)
89839 {
89840 if (!rcu_nocb_need_deferred_wakeup(rdp))
89841 return;
89842- ACCESS_ONCE(rdp->nocb_defer_wakeup) = false;
89843+ ACCESS_ONCE_RW(rdp->nocb_defer_wakeup) = false;
89844 wake_up(&rdp->nocb_wq);
89845 trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, TPS("DeferredWakeEmpty"));
89846 }
89847@@ -2377,7 +2377,7 @@ static void __init rcu_spawn_nocb_kthreads(struct rcu_state *rsp)
89848 t = kthread_run(rcu_nocb_kthread, rdp,
89849 "rcuo%c/%d", rsp->abbr, cpu);
89850 BUG_ON(IS_ERR(t));
89851- ACCESS_ONCE(rdp->nocb_kthread) = t;
89852+ ACCESS_ONCE_RW(rdp->nocb_kthread) = t;
89853 }
89854 }
89855
89856@@ -2513,11 +2513,11 @@ static void rcu_sysidle_enter(struct rcu_dynticks *rdtp, int irq)
89857
89858 /* Record start of fully idle period. */
89859 j = jiffies;
89860- ACCESS_ONCE(rdtp->dynticks_idle_jiffies) = j;
89861+ ACCESS_ONCE_RW(rdtp->dynticks_idle_jiffies) = j;
89862 smp_mb__before_atomic_inc();
89863- atomic_inc(&rdtp->dynticks_idle);
89864+ atomic_inc_unchecked(&rdtp->dynticks_idle);
89865 smp_mb__after_atomic_inc();
89866- WARN_ON_ONCE(atomic_read(&rdtp->dynticks_idle) & 0x1);
89867+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks_idle) & 0x1);
89868 }
89869
89870 /*
89871@@ -2582,9 +2582,9 @@ static void rcu_sysidle_exit(struct rcu_dynticks *rdtp, int irq)
89872
89873 /* Record end of idle period. */
89874 smp_mb__before_atomic_inc();
89875- atomic_inc(&rdtp->dynticks_idle);
89876+ atomic_inc_unchecked(&rdtp->dynticks_idle);
89877 smp_mb__after_atomic_inc();
89878- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks_idle) & 0x1));
89879+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks_idle) & 0x1));
89880
89881 /*
89882 * If we are the timekeeping CPU, we are permitted to be non-idle
89883@@ -2625,7 +2625,7 @@ static void rcu_sysidle_check_cpu(struct rcu_data *rdp, bool *isidle,
89884 WARN_ON_ONCE(smp_processor_id() != tick_do_timer_cpu);
89885
89886 /* Pick up current idle and NMI-nesting counter and check. */
89887- cur = atomic_read(&rdtp->dynticks_idle);
89888+ cur = atomic_read_unchecked(&rdtp->dynticks_idle);
89889 if (cur & 0x1) {
89890 *isidle = false; /* We are not idle! */
89891 return;
89892@@ -2688,7 +2688,7 @@ static void rcu_sysidle(unsigned long j)
89893 case RCU_SYSIDLE_NOT:
89894
89895 /* First time all are idle, so note a short idle period. */
89896- ACCESS_ONCE(full_sysidle_state) = RCU_SYSIDLE_SHORT;
89897+ ACCESS_ONCE_RW(full_sysidle_state) = RCU_SYSIDLE_SHORT;
89898 break;
89899
89900 case RCU_SYSIDLE_SHORT:
89901@@ -2725,7 +2725,7 @@ static void rcu_sysidle(unsigned long j)
89902 static void rcu_sysidle_cancel(void)
89903 {
89904 smp_mb();
89905- ACCESS_ONCE(full_sysidle_state) = RCU_SYSIDLE_NOT;
89906+ ACCESS_ONCE_RW(full_sysidle_state) = RCU_SYSIDLE_NOT;
89907 }
89908
89909 /*
89910@@ -2773,7 +2773,7 @@ static void rcu_sysidle_cb(struct rcu_head *rhp)
89911 smp_mb(); /* grace period precedes setting inuse. */
89912
89913 rshp = container_of(rhp, struct rcu_sysidle_head, rh);
89914- ACCESS_ONCE(rshp->inuse) = 0;
89915+ ACCESS_ONCE_RW(rshp->inuse) = 0;
89916 }
89917
89918 /*
89919diff --git a/kernel/rcu/tree_trace.c b/kernel/rcu/tree_trace.c
89920index 4def475..8ffddde 100644
89921--- a/kernel/rcu/tree_trace.c
89922+++ b/kernel/rcu/tree_trace.c
89923@@ -121,7 +121,7 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
89924 ulong2long(rdp->completed), ulong2long(rdp->gpnum),
89925 rdp->passed_quiesce, rdp->qs_pending);
89926 seq_printf(m, " dt=%d/%llx/%d df=%lu",
89927- atomic_read(&rdp->dynticks->dynticks),
89928+ atomic_read_unchecked(&rdp->dynticks->dynticks),
89929 rdp->dynticks->dynticks_nesting,
89930 rdp->dynticks->dynticks_nmi_nesting,
89931 rdp->dynticks_fqs);
89932@@ -182,17 +182,17 @@ static int show_rcuexp(struct seq_file *m, void *v)
89933 struct rcu_state *rsp = (struct rcu_state *)m->private;
89934
89935 seq_printf(m, "s=%lu d=%lu w=%lu tf=%lu wd1=%lu wd2=%lu n=%lu sc=%lu dt=%lu dl=%lu dx=%lu\n",
89936- atomic_long_read(&rsp->expedited_start),
89937+ atomic_long_read_unchecked(&rsp->expedited_start),
89938 atomic_long_read(&rsp->expedited_done),
89939- atomic_long_read(&rsp->expedited_wrap),
89940- atomic_long_read(&rsp->expedited_tryfail),
89941- atomic_long_read(&rsp->expedited_workdone1),
89942- atomic_long_read(&rsp->expedited_workdone2),
89943- atomic_long_read(&rsp->expedited_normal),
89944- atomic_long_read(&rsp->expedited_stoppedcpus),
89945- atomic_long_read(&rsp->expedited_done_tries),
89946- atomic_long_read(&rsp->expedited_done_lost),
89947- atomic_long_read(&rsp->expedited_done_exit));
89948+ atomic_long_read_unchecked(&rsp->expedited_wrap),
89949+ atomic_long_read_unchecked(&rsp->expedited_tryfail),
89950+ atomic_long_read_unchecked(&rsp->expedited_workdone1),
89951+ atomic_long_read_unchecked(&rsp->expedited_workdone2),
89952+ atomic_long_read_unchecked(&rsp->expedited_normal),
89953+ atomic_long_read_unchecked(&rsp->expedited_stoppedcpus),
89954+ atomic_long_read_unchecked(&rsp->expedited_done_tries),
89955+ atomic_long_read_unchecked(&rsp->expedited_done_lost),
89956+ atomic_long_read_unchecked(&rsp->expedited_done_exit));
89957 return 0;
89958 }
89959
89960diff --git a/kernel/rcu/update.c b/kernel/rcu/update.c
89961index c54609f..2e8829c 100644
89962--- a/kernel/rcu/update.c
89963+++ b/kernel/rcu/update.c
89964@@ -312,10 +312,10 @@ int rcu_jiffies_till_stall_check(void)
89965 * for CONFIG_RCU_CPU_STALL_TIMEOUT.
89966 */
89967 if (till_stall_check < 3) {
89968- ACCESS_ONCE(rcu_cpu_stall_timeout) = 3;
89969+ ACCESS_ONCE_RW(rcu_cpu_stall_timeout) = 3;
89970 till_stall_check = 3;
89971 } else if (till_stall_check > 300) {
89972- ACCESS_ONCE(rcu_cpu_stall_timeout) = 300;
89973+ ACCESS_ONCE_RW(rcu_cpu_stall_timeout) = 300;
89974 till_stall_check = 300;
89975 }
89976 return till_stall_check * HZ + RCU_STALL_DELAY_DELTA;
89977diff --git a/kernel/resource.c b/kernel/resource.c
89978index 3f285dc..5755f62 100644
89979--- a/kernel/resource.c
89980+++ b/kernel/resource.c
89981@@ -152,8 +152,18 @@ static const struct file_operations proc_iomem_operations = {
89982
89983 static int __init ioresources_init(void)
89984 {
89985+#ifdef CONFIG_GRKERNSEC_PROC_ADD
89986+#ifdef CONFIG_GRKERNSEC_PROC_USER
89987+ proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
89988+ proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
89989+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
89990+ proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
89991+ proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
89992+#endif
89993+#else
89994 proc_create("ioports", 0, NULL, &proc_ioports_operations);
89995 proc_create("iomem", 0, NULL, &proc_iomem_operations);
89996+#endif
89997 return 0;
89998 }
89999 __initcall(ioresources_init);
90000diff --git a/kernel/sched/auto_group.c b/kernel/sched/auto_group.c
90001index 4a07353..66b5291 100644
90002--- a/kernel/sched/auto_group.c
90003+++ b/kernel/sched/auto_group.c
90004@@ -11,7 +11,7 @@
90005
90006 unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1;
90007 static struct autogroup autogroup_default;
90008-static atomic_t autogroup_seq_nr;
90009+static atomic_unchecked_t autogroup_seq_nr;
90010
90011 void __init autogroup_init(struct task_struct *init_task)
90012 {
90013@@ -79,7 +79,7 @@ static inline struct autogroup *autogroup_create(void)
90014
90015 kref_init(&ag->kref);
90016 init_rwsem(&ag->lock);
90017- ag->id = atomic_inc_return(&autogroup_seq_nr);
90018+ ag->id = atomic_inc_return_unchecked(&autogroup_seq_nr);
90019 ag->tg = tg;
90020 #ifdef CONFIG_RT_GROUP_SCHED
90021 /*
90022diff --git a/kernel/sched/completion.c b/kernel/sched/completion.c
90023index a63f4dc..349bbb0 100644
90024--- a/kernel/sched/completion.c
90025+++ b/kernel/sched/completion.c
90026@@ -204,7 +204,7 @@ EXPORT_SYMBOL(wait_for_completion_interruptible);
90027 * Return: -ERESTARTSYS if interrupted, 0 if timed out, positive (at least 1,
90028 * or number of jiffies left till timeout) if completed.
90029 */
90030-long __sched
90031+long __sched __intentional_overflow(-1)
90032 wait_for_completion_interruptible_timeout(struct completion *x,
90033 unsigned long timeout)
90034 {
90035@@ -221,7 +221,7 @@ EXPORT_SYMBOL(wait_for_completion_interruptible_timeout);
90036 *
90037 * Return: -ERESTARTSYS if interrupted, 0 if completed.
90038 */
90039-int __sched wait_for_completion_killable(struct completion *x)
90040+int __sched __intentional_overflow(-1) wait_for_completion_killable(struct completion *x)
90041 {
90042 long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_KILLABLE);
90043 if (t == -ERESTARTSYS)
90044@@ -242,7 +242,7 @@ EXPORT_SYMBOL(wait_for_completion_killable);
90045 * Return: -ERESTARTSYS if interrupted, 0 if timed out, positive (at least 1,
90046 * or number of jiffies left till timeout) if completed.
90047 */
90048-long __sched
90049+long __sched __intentional_overflow(-1)
90050 wait_for_completion_killable_timeout(struct completion *x,
90051 unsigned long timeout)
90052 {
90053diff --git a/kernel/sched/core.c b/kernel/sched/core.c
90054index f5c6635..7133356 100644
90055--- a/kernel/sched/core.c
90056+++ b/kernel/sched/core.c
90057@@ -1775,7 +1775,7 @@ void set_numabalancing_state(bool enabled)
90058 int sysctl_numa_balancing(struct ctl_table *table, int write,
90059 void __user *buffer, size_t *lenp, loff_t *ppos)
90060 {
90061- struct ctl_table t;
90062+ ctl_table_no_const t;
90063 int err;
90064 int state = numabalancing_enabled;
90065
90066@@ -2251,8 +2251,10 @@ context_switch(struct rq *rq, struct task_struct *prev,
90067 next->active_mm = oldmm;
90068 atomic_inc(&oldmm->mm_count);
90069 enter_lazy_tlb(oldmm, next);
90070- } else
90071+ } else {
90072 switch_mm(oldmm, mm, next);
90073+ populate_stack();
90074+ }
90075
90076 if (!prev->mm) {
90077 prev->active_mm = NULL;
90078@@ -3049,6 +3051,8 @@ int can_nice(const struct task_struct *p, const int nice)
90079 /* convert nice value [19,-20] to rlimit style value [1,40] */
90080 int nice_rlim = 20 - nice;
90081
90082+ gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
90083+
90084 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
90085 capable(CAP_SYS_NICE));
90086 }
90087@@ -3082,7 +3086,8 @@ SYSCALL_DEFINE1(nice, int, increment)
90088 if (nice > 19)
90089 nice = 19;
90090
90091- if (increment < 0 && !can_nice(current, nice))
90092+ if (increment < 0 && (!can_nice(current, nice) ||
90093+ gr_handle_chroot_nice()))
90094 return -EPERM;
90095
90096 retval = security_task_setnice(current, nice);
90097@@ -3332,6 +3337,7 @@ recheck:
90098 if (policy != p->policy && !rlim_rtprio)
90099 return -EPERM;
90100
90101+ gr_learn_resource(p, RLIMIT_RTPRIO, attr->sched_priority, 1);
90102 /* can't increase priority */
90103 if (attr->sched_priority > p->rt_priority &&
90104 attr->sched_priority > rlim_rtprio)
90105@@ -4702,8 +4708,10 @@ void idle_task_exit(void)
90106
90107 BUG_ON(cpu_online(smp_processor_id()));
90108
90109- if (mm != &init_mm)
90110+ if (mm != &init_mm) {
90111 switch_mm(mm, &init_mm, current);
90112+ populate_stack();
90113+ }
90114 mmdrop(mm);
90115 }
90116
90117@@ -4781,7 +4789,7 @@ static void migrate_tasks(unsigned int dead_cpu)
90118
90119 #if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL)
90120
90121-static struct ctl_table sd_ctl_dir[] = {
90122+static ctl_table_no_const sd_ctl_dir[] __read_only = {
90123 {
90124 .procname = "sched_domain",
90125 .mode = 0555,
90126@@ -4798,17 +4806,17 @@ static struct ctl_table sd_ctl_root[] = {
90127 {}
90128 };
90129
90130-static struct ctl_table *sd_alloc_ctl_entry(int n)
90131+static ctl_table_no_const *sd_alloc_ctl_entry(int n)
90132 {
90133- struct ctl_table *entry =
90134+ ctl_table_no_const *entry =
90135 kcalloc(n, sizeof(struct ctl_table), GFP_KERNEL);
90136
90137 return entry;
90138 }
90139
90140-static void sd_free_ctl_entry(struct ctl_table **tablep)
90141+static void sd_free_ctl_entry(ctl_table_no_const *tablep)
90142 {
90143- struct ctl_table *entry;
90144+ ctl_table_no_const *entry;
90145
90146 /*
90147 * In the intermediate directories, both the child directory and
90148@@ -4816,22 +4824,25 @@ static void sd_free_ctl_entry(struct ctl_table **tablep)
90149 * will always be set. In the lowest directory the names are
90150 * static strings and all have proc handlers.
90151 */
90152- for (entry = *tablep; entry->mode; entry++) {
90153- if (entry->child)
90154- sd_free_ctl_entry(&entry->child);
90155+ for (entry = tablep; entry->mode; entry++) {
90156+ if (entry->child) {
90157+ sd_free_ctl_entry(entry->child);
90158+ pax_open_kernel();
90159+ entry->child = NULL;
90160+ pax_close_kernel();
90161+ }
90162 if (entry->proc_handler == NULL)
90163 kfree(entry->procname);
90164 }
90165
90166- kfree(*tablep);
90167- *tablep = NULL;
90168+ kfree(tablep);
90169 }
90170
90171 static int min_load_idx = 0;
90172 static int max_load_idx = CPU_LOAD_IDX_MAX-1;
90173
90174 static void
90175-set_table_entry(struct ctl_table *entry,
90176+set_table_entry(ctl_table_no_const *entry,
90177 const char *procname, void *data, int maxlen,
90178 umode_t mode, proc_handler *proc_handler,
90179 bool load_idx)
90180@@ -4851,7 +4862,7 @@ set_table_entry(struct ctl_table *entry,
90181 static struct ctl_table *
90182 sd_alloc_ctl_domain_table(struct sched_domain *sd)
90183 {
90184- struct ctl_table *table = sd_alloc_ctl_entry(13);
90185+ ctl_table_no_const *table = sd_alloc_ctl_entry(13);
90186
90187 if (table == NULL)
90188 return NULL;
90189@@ -4886,9 +4897,9 @@ sd_alloc_ctl_domain_table(struct sched_domain *sd)
90190 return table;
90191 }
90192
90193-static struct ctl_table *sd_alloc_ctl_cpu_table(int cpu)
90194+static ctl_table_no_const *sd_alloc_ctl_cpu_table(int cpu)
90195 {
90196- struct ctl_table *entry, *table;
90197+ ctl_table_no_const *entry, *table;
90198 struct sched_domain *sd;
90199 int domain_num = 0, i;
90200 char buf[32];
90201@@ -4915,11 +4926,13 @@ static struct ctl_table_header *sd_sysctl_header;
90202 static void register_sched_domain_sysctl(void)
90203 {
90204 int i, cpu_num = num_possible_cpus();
90205- struct ctl_table *entry = sd_alloc_ctl_entry(cpu_num + 1);
90206+ ctl_table_no_const *entry = sd_alloc_ctl_entry(cpu_num + 1);
90207 char buf[32];
90208
90209 WARN_ON(sd_ctl_dir[0].child);
90210+ pax_open_kernel();
90211 sd_ctl_dir[0].child = entry;
90212+ pax_close_kernel();
90213
90214 if (entry == NULL)
90215 return;
90216@@ -4942,8 +4955,12 @@ static void unregister_sched_domain_sysctl(void)
90217 if (sd_sysctl_header)
90218 unregister_sysctl_table(sd_sysctl_header);
90219 sd_sysctl_header = NULL;
90220- if (sd_ctl_dir[0].child)
90221- sd_free_ctl_entry(&sd_ctl_dir[0].child);
90222+ if (sd_ctl_dir[0].child) {
90223+ sd_free_ctl_entry(sd_ctl_dir[0].child);
90224+ pax_open_kernel();
90225+ sd_ctl_dir[0].child = NULL;
90226+ pax_close_kernel();
90227+ }
90228 }
90229 #else
90230 static void register_sched_domain_sysctl(void)
90231diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
90232index 9b4c4f3..665489b 100644
90233--- a/kernel/sched/fair.c
90234+++ b/kernel/sched/fair.c
90235@@ -1647,7 +1647,7 @@ void task_numa_fault(int last_cpupid, int node, int pages, int flags)
90236
90237 static void reset_ptenuma_scan(struct task_struct *p)
90238 {
90239- ACCESS_ONCE(p->mm->numa_scan_seq)++;
90240+ ACCESS_ONCE_RW(p->mm->numa_scan_seq)++;
90241 p->mm->numa_scan_offset = 0;
90242 }
90243
90244@@ -6851,7 +6851,7 @@ static void nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle) { }
90245 * run_rebalance_domains is triggered when needed from the scheduler tick.
90246 * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
90247 */
90248-static void run_rebalance_domains(struct softirq_action *h)
90249+static __latent_entropy void run_rebalance_domains(void)
90250 {
90251 struct rq *this_rq = this_rq();
90252 enum cpu_idle_type idle = this_rq->idle_balance ?
90253diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
90254index f964add..dcd823d 100644
90255--- a/kernel/sched/sched.h
90256+++ b/kernel/sched/sched.h
90257@@ -1157,7 +1157,7 @@ struct sched_class {
90258 #ifdef CONFIG_FAIR_GROUP_SCHED
90259 void (*task_move_group) (struct task_struct *p, int on_rq);
90260 #endif
90261-};
90262+} __do_const;
90263
90264 #define sched_class_highest (&stop_sched_class)
90265 #define for_each_class(class) \
90266diff --git a/kernel/signal.c b/kernel/signal.c
90267index 52f881d..1e9f941 100644
90268--- a/kernel/signal.c
90269+++ b/kernel/signal.c
90270@@ -51,12 +51,12 @@ static struct kmem_cache *sigqueue_cachep;
90271
90272 int print_fatal_signals __read_mostly;
90273
90274-static void __user *sig_handler(struct task_struct *t, int sig)
90275+static __sighandler_t sig_handler(struct task_struct *t, int sig)
90276 {
90277 return t->sighand->action[sig - 1].sa.sa_handler;
90278 }
90279
90280-static int sig_handler_ignored(void __user *handler, int sig)
90281+static int sig_handler_ignored(__sighandler_t handler, int sig)
90282 {
90283 /* Is it explicitly or implicitly ignored? */
90284 return handler == SIG_IGN ||
90285@@ -65,7 +65,7 @@ static int sig_handler_ignored(void __user *handler, int sig)
90286
90287 static int sig_task_ignored(struct task_struct *t, int sig, bool force)
90288 {
90289- void __user *handler;
90290+ __sighandler_t handler;
90291
90292 handler = sig_handler(t, sig);
90293
90294@@ -369,6 +369,9 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimi
90295 atomic_inc(&user->sigpending);
90296 rcu_read_unlock();
90297
90298+ if (!override_rlimit)
90299+ gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
90300+
90301 if (override_rlimit ||
90302 atomic_read(&user->sigpending) <=
90303 task_rlimit(t, RLIMIT_SIGPENDING)) {
90304@@ -496,7 +499,7 @@ flush_signal_handlers(struct task_struct *t, int force_default)
90305
90306 int unhandled_signal(struct task_struct *tsk, int sig)
90307 {
90308- void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
90309+ __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
90310 if (is_global_init(tsk))
90311 return 1;
90312 if (handler != SIG_IGN && handler != SIG_DFL)
90313@@ -816,6 +819,13 @@ static int check_kill_permission(int sig, struct siginfo *info,
90314 }
90315 }
90316
90317+ /* allow glibc communication via tgkill to other threads in our
90318+ thread group */
90319+ if ((info == SEND_SIG_NOINFO || info->si_code != SI_TKILL ||
90320+ sig != (SIGRTMIN+1) || task_tgid_vnr(t) != info->si_pid)
90321+ && gr_handle_signal(t, sig))
90322+ return -EPERM;
90323+
90324 return security_task_kill(t, info, sig, 0);
90325 }
90326
90327@@ -1199,7 +1209,7 @@ __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
90328 return send_signal(sig, info, p, 1);
90329 }
90330
90331-static int
90332+int
90333 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
90334 {
90335 return send_signal(sig, info, t, 0);
90336@@ -1236,6 +1246,7 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
90337 unsigned long int flags;
90338 int ret, blocked, ignored;
90339 struct k_sigaction *action;
90340+ int is_unhandled = 0;
90341
90342 spin_lock_irqsave(&t->sighand->siglock, flags);
90343 action = &t->sighand->action[sig-1];
90344@@ -1250,9 +1261,18 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
90345 }
90346 if (action->sa.sa_handler == SIG_DFL)
90347 t->signal->flags &= ~SIGNAL_UNKILLABLE;
90348+ if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL)
90349+ is_unhandled = 1;
90350 ret = specific_send_sig_info(sig, info, t);
90351 spin_unlock_irqrestore(&t->sighand->siglock, flags);
90352
90353+ /* only deal with unhandled signals, java etc trigger SIGSEGV during
90354+ normal operation */
90355+ if (is_unhandled) {
90356+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
90357+ gr_handle_crash(t, sig);
90358+ }
90359+
90360 return ret;
90361 }
90362
90363@@ -1319,8 +1339,11 @@ int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
90364 ret = check_kill_permission(sig, info, p);
90365 rcu_read_unlock();
90366
90367- if (!ret && sig)
90368+ if (!ret && sig) {
90369 ret = do_send_sig_info(sig, info, p, true);
90370+ if (!ret)
90371+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
90372+ }
90373
90374 return ret;
90375 }
90376@@ -2926,7 +2949,15 @@ do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
90377 int error = -ESRCH;
90378
90379 rcu_read_lock();
90380- p = find_task_by_vpid(pid);
90381+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
90382+ /* allow glibc communication via tgkill to other threads in our
90383+ thread group */
90384+ if (grsec_enable_chroot_findtask && info->si_code == SI_TKILL &&
90385+ sig == (SIGRTMIN+1) && tgid == info->si_pid)
90386+ p = find_task_by_vpid_unrestricted(pid);
90387+ else
90388+#endif
90389+ p = find_task_by_vpid(pid);
90390 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
90391 error = check_kill_permission(sig, info, p);
90392 /*
90393@@ -3239,8 +3270,8 @@ COMPAT_SYSCALL_DEFINE2(sigaltstack,
90394 }
90395 seg = get_fs();
90396 set_fs(KERNEL_DS);
90397- ret = do_sigaltstack((stack_t __force __user *) (uss_ptr ? &uss : NULL),
90398- (stack_t __force __user *) &uoss,
90399+ ret = do_sigaltstack((stack_t __force_user *) (uss_ptr ? &uss : NULL),
90400+ (stack_t __force_user *) &uoss,
90401 compat_user_stack_pointer());
90402 set_fs(seg);
90403 if (ret >= 0 && uoss_ptr) {
90404diff --git a/kernel/smpboot.c b/kernel/smpboot.c
90405index eb89e18..a4e6792 100644
90406--- a/kernel/smpboot.c
90407+++ b/kernel/smpboot.c
90408@@ -288,7 +288,7 @@ int smpboot_register_percpu_thread(struct smp_hotplug_thread *plug_thread)
90409 }
90410 smpboot_unpark_thread(plug_thread, cpu);
90411 }
90412- list_add(&plug_thread->list, &hotplug_threads);
90413+ pax_list_add(&plug_thread->list, &hotplug_threads);
90414 out:
90415 mutex_unlock(&smpboot_threads_lock);
90416 return ret;
90417@@ -305,7 +305,7 @@ void smpboot_unregister_percpu_thread(struct smp_hotplug_thread *plug_thread)
90418 {
90419 get_online_cpus();
90420 mutex_lock(&smpboot_threads_lock);
90421- list_del(&plug_thread->list);
90422+ pax_list_del(&plug_thread->list);
90423 smpboot_destroy_threads(plug_thread);
90424 mutex_unlock(&smpboot_threads_lock);
90425 put_online_cpus();
90426diff --git a/kernel/softirq.c b/kernel/softirq.c
90427index 490fcbb..1e502c6 100644
90428--- a/kernel/softirq.c
90429+++ b/kernel/softirq.c
90430@@ -52,7 +52,7 @@ irq_cpustat_t irq_stat[NR_CPUS] ____cacheline_aligned;
90431 EXPORT_SYMBOL(irq_stat);
90432 #endif
90433
90434-static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
90435+static struct softirq_action softirq_vec[NR_SOFTIRQS] __read_only __aligned(PAGE_SIZE);
90436
90437 DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
90438
90439@@ -267,7 +267,7 @@ restart:
90440 kstat_incr_softirqs_this_cpu(vec_nr);
90441
90442 trace_softirq_entry(vec_nr);
90443- h->action(h);
90444+ h->action();
90445 trace_softirq_exit(vec_nr);
90446 if (unlikely(prev_count != preempt_count())) {
90447 pr_err("huh, entered softirq %u %s %p with preempt_count %08x, exited with %08x?\n",
90448@@ -427,7 +427,7 @@ void __raise_softirq_irqoff(unsigned int nr)
90449 or_softirq_pending(1UL << nr);
90450 }
90451
90452-void open_softirq(int nr, void (*action)(struct softirq_action *))
90453+void __init open_softirq(int nr, void (*action)(void))
90454 {
90455 softirq_vec[nr].action = action;
90456 }
90457@@ -479,7 +479,7 @@ void __tasklet_hi_schedule_first(struct tasklet_struct *t)
90458 }
90459 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
90460
90461-static void tasklet_action(struct softirq_action *a)
90462+static void tasklet_action(void)
90463 {
90464 struct tasklet_struct *list;
90465
90466@@ -515,7 +515,7 @@ static void tasklet_action(struct softirq_action *a)
90467 }
90468 }
90469
90470-static void tasklet_hi_action(struct softirq_action *a)
90471+static __latent_entropy void tasklet_hi_action(void)
90472 {
90473 struct tasklet_struct *list;
90474
90475@@ -742,7 +742,7 @@ static struct notifier_block cpu_nfb = {
90476 .notifier_call = cpu_callback
90477 };
90478
90479-static struct smp_hotplug_thread softirq_threads = {
90480+static struct smp_hotplug_thread softirq_threads __read_only = {
90481 .store = &ksoftirqd,
90482 .thread_should_run = ksoftirqd_should_run,
90483 .thread_fn = run_ksoftirqd,
90484diff --git a/kernel/sys.c b/kernel/sys.c
90485index c0a58be..784c618 100644
90486--- a/kernel/sys.c
90487+++ b/kernel/sys.c
90488@@ -148,6 +148,12 @@ static int set_one_prio(struct task_struct *p, int niceval, int error)
90489 error = -EACCES;
90490 goto out;
90491 }
90492+
90493+ if (gr_handle_chroot_setpriority(p, niceval)) {
90494+ error = -EACCES;
90495+ goto out;
90496+ }
90497+
90498 no_nice = security_task_setnice(p, niceval);
90499 if (no_nice) {
90500 error = no_nice;
90501@@ -351,6 +357,9 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid)
90502 goto error;
90503 }
90504
90505+ if (gr_check_group_change(new->gid, new->egid, INVALID_GID))
90506+ goto error;
90507+
90508 if (rgid != (gid_t) -1 ||
90509 (egid != (gid_t) -1 && !gid_eq(kegid, old->gid)))
90510 new->sgid = new->egid;
90511@@ -386,6 +395,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
90512 old = current_cred();
90513
90514 retval = -EPERM;
90515+
90516+ if (gr_check_group_change(kgid, kgid, kgid))
90517+ goto error;
90518+
90519 if (ns_capable(old->user_ns, CAP_SETGID))
90520 new->gid = new->egid = new->sgid = new->fsgid = kgid;
90521 else if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->sgid))
90522@@ -403,7 +416,7 @@ error:
90523 /*
90524 * change the user struct in a credentials set to match the new UID
90525 */
90526-static int set_user(struct cred *new)
90527+int set_user(struct cred *new)
90528 {
90529 struct user_struct *new_user;
90530
90531@@ -483,6 +496,9 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
90532 goto error;
90533 }
90534
90535+ if (gr_check_user_change(new->uid, new->euid, INVALID_UID))
90536+ goto error;
90537+
90538 if (!uid_eq(new->uid, old->uid)) {
90539 retval = set_user(new);
90540 if (retval < 0)
90541@@ -533,6 +549,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
90542 old = current_cred();
90543
90544 retval = -EPERM;
90545+
90546+ if (gr_check_crash_uid(kuid))
90547+ goto error;
90548+ if (gr_check_user_change(kuid, kuid, kuid))
90549+ goto error;
90550+
90551 if (ns_capable(old->user_ns, CAP_SETUID)) {
90552 new->suid = new->uid = kuid;
90553 if (!uid_eq(kuid, old->uid)) {
90554@@ -602,6 +624,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
90555 goto error;
90556 }
90557
90558+ if (gr_check_user_change(kruid, keuid, INVALID_UID))
90559+ goto error;
90560+
90561 if (ruid != (uid_t) -1) {
90562 new->uid = kruid;
90563 if (!uid_eq(kruid, old->uid)) {
90564@@ -684,6 +709,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
90565 goto error;
90566 }
90567
90568+ if (gr_check_group_change(krgid, kegid, INVALID_GID))
90569+ goto error;
90570+
90571 if (rgid != (gid_t) -1)
90572 new->gid = krgid;
90573 if (egid != (gid_t) -1)
90574@@ -745,12 +773,16 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
90575 uid_eq(kuid, old->suid) || uid_eq(kuid, old->fsuid) ||
90576 ns_capable(old->user_ns, CAP_SETUID)) {
90577 if (!uid_eq(kuid, old->fsuid)) {
90578+ if (gr_check_user_change(INVALID_UID, INVALID_UID, kuid))
90579+ goto error;
90580+
90581 new->fsuid = kuid;
90582 if (security_task_fix_setuid(new, old, LSM_SETID_FS) == 0)
90583 goto change_okay;
90584 }
90585 }
90586
90587+error:
90588 abort_creds(new);
90589 return old_fsuid;
90590
90591@@ -783,12 +815,16 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
90592 if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->egid) ||
90593 gid_eq(kgid, old->sgid) || gid_eq(kgid, old->fsgid) ||
90594 ns_capable(old->user_ns, CAP_SETGID)) {
90595+ if (gr_check_group_change(INVALID_GID, INVALID_GID, kgid))
90596+ goto error;
90597+
90598 if (!gid_eq(kgid, old->fsgid)) {
90599 new->fsgid = kgid;
90600 goto change_okay;
90601 }
90602 }
90603
90604+error:
90605 abort_creds(new);
90606 return old_fsgid;
90607
90608@@ -1167,19 +1203,19 @@ SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name)
90609 return -EFAULT;
90610
90611 down_read(&uts_sem);
90612- error = __copy_to_user(&name->sysname, &utsname()->sysname,
90613+ error = __copy_to_user(name->sysname, &utsname()->sysname,
90614 __OLD_UTS_LEN);
90615 error |= __put_user(0, name->sysname + __OLD_UTS_LEN);
90616- error |= __copy_to_user(&name->nodename, &utsname()->nodename,
90617+ error |= __copy_to_user(name->nodename, &utsname()->nodename,
90618 __OLD_UTS_LEN);
90619 error |= __put_user(0, name->nodename + __OLD_UTS_LEN);
90620- error |= __copy_to_user(&name->release, &utsname()->release,
90621+ error |= __copy_to_user(name->release, &utsname()->release,
90622 __OLD_UTS_LEN);
90623 error |= __put_user(0, name->release + __OLD_UTS_LEN);
90624- error |= __copy_to_user(&name->version, &utsname()->version,
90625+ error |= __copy_to_user(name->version, &utsname()->version,
90626 __OLD_UTS_LEN);
90627 error |= __put_user(0, name->version + __OLD_UTS_LEN);
90628- error |= __copy_to_user(&name->machine, &utsname()->machine,
90629+ error |= __copy_to_user(name->machine, &utsname()->machine,
90630 __OLD_UTS_LEN);
90631 error |= __put_user(0, name->machine + __OLD_UTS_LEN);
90632 up_read(&uts_sem);
90633@@ -1381,6 +1417,13 @@ int do_prlimit(struct task_struct *tsk, unsigned int resource,
90634 */
90635 new_rlim->rlim_cur = 1;
90636 }
90637+ /* Handle the case where a fork and setuid occur and then RLIMIT_NPROC
90638+ is changed to a lower value. Since tasks can be created by the same
90639+ user in between this limit change and an execve by this task, force
90640+ a recheck only for this task by setting PF_NPROC_EXCEEDED
90641+ */
90642+ if (resource == RLIMIT_NPROC && tsk->real_cred->user != INIT_USER)
90643+ tsk->flags |= PF_NPROC_EXCEEDED;
90644 }
90645 if (!retval) {
90646 if (old_rlim)
90647diff --git a/kernel/sysctl.c b/kernel/sysctl.c
90648index aae21e8..58d8c9a 100644
90649--- a/kernel/sysctl.c
90650+++ b/kernel/sysctl.c
90651@@ -94,7 +94,6 @@
90652
90653
90654 #if defined(CONFIG_SYSCTL)
90655-
90656 /* External variables not in a header file. */
90657 extern int max_threads;
90658 extern int suid_dumpable;
90659@@ -118,19 +117,18 @@ extern int blk_iopoll_enabled;
90660
90661 /* Constants used for minimum and maximum */
90662 #ifdef CONFIG_LOCKUP_DETECTOR
90663-static int sixty = 60;
90664+static int sixty __read_only = 60;
90665 #endif
90666
90667-static int __maybe_unused neg_one = -1;
90668-
90669-static int zero;
90670-static int __maybe_unused one = 1;
90671-static int __maybe_unused two = 2;
90672-static int __maybe_unused three = 3;
90673-static unsigned long one_ul = 1;
90674-static int one_hundred = 100;
90675+static int __maybe_unused neg_one __read_only = -1;
90676+static int zero __read_only = 0;
90677+static int __maybe_unused one __read_only = 1;
90678+static int __maybe_unused two __read_only = 2;
90679+static int __maybe_unused three __read_only = 3;
90680+static unsigned long one_ul __read_only = 1;
90681+static int one_hundred __read_only = 100;
90682 #ifdef CONFIG_PRINTK
90683-static int ten_thousand = 10000;
90684+static int ten_thousand __read_only = 10000;
90685 #endif
90686
90687 /* this is needed for the proc_doulongvec_minmax of vm_dirty_bytes */
90688@@ -182,10 +180,8 @@ static int proc_taint(struct ctl_table *table, int write,
90689 void __user *buffer, size_t *lenp, loff_t *ppos);
90690 #endif
90691
90692-#ifdef CONFIG_PRINTK
90693 static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
90694 void __user *buffer, size_t *lenp, loff_t *ppos);
90695-#endif
90696
90697 static int proc_dointvec_minmax_coredump(struct ctl_table *table, int write,
90698 void __user *buffer, size_t *lenp, loff_t *ppos);
90699@@ -216,6 +212,8 @@ static int sysrq_sysctl_handler(ctl_table *table, int write,
90700
90701 #endif
90702
90703+extern struct ctl_table grsecurity_table[];
90704+
90705 static struct ctl_table kern_table[];
90706 static struct ctl_table vm_table[];
90707 static struct ctl_table fs_table[];
90708@@ -230,6 +228,20 @@ extern struct ctl_table epoll_table[];
90709 int sysctl_legacy_va_layout;
90710 #endif
90711
90712+#ifdef CONFIG_PAX_SOFTMODE
90713+static ctl_table pax_table[] = {
90714+ {
90715+ .procname = "softmode",
90716+ .data = &pax_softmode,
90717+ .maxlen = sizeof(unsigned int),
90718+ .mode = 0600,
90719+ .proc_handler = &proc_dointvec,
90720+ },
90721+
90722+ { }
90723+};
90724+#endif
90725+
90726 /* The default sysctl tables: */
90727
90728 static struct ctl_table sysctl_base_table[] = {
90729@@ -278,6 +290,22 @@ static int max_extfrag_threshold = 1000;
90730 #endif
90731
90732 static struct ctl_table kern_table[] = {
90733+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
90734+ {
90735+ .procname = "grsecurity",
90736+ .mode = 0500,
90737+ .child = grsecurity_table,
90738+ },
90739+#endif
90740+
90741+#ifdef CONFIG_PAX_SOFTMODE
90742+ {
90743+ .procname = "pax",
90744+ .mode = 0500,
90745+ .child = pax_table,
90746+ },
90747+#endif
90748+
90749 {
90750 .procname = "sched_child_runs_first",
90751 .data = &sysctl_sched_child_runs_first,
90752@@ -640,7 +668,7 @@ static struct ctl_table kern_table[] = {
90753 .data = &modprobe_path,
90754 .maxlen = KMOD_PATH_LEN,
90755 .mode = 0644,
90756- .proc_handler = proc_dostring,
90757+ .proc_handler = proc_dostring_modpriv,
90758 },
90759 {
90760 .procname = "modules_disabled",
90761@@ -807,16 +835,20 @@ static struct ctl_table kern_table[] = {
90762 .extra1 = &zero,
90763 .extra2 = &one,
90764 },
90765+#endif
90766 {
90767 .procname = "kptr_restrict",
90768 .data = &kptr_restrict,
90769 .maxlen = sizeof(int),
90770 .mode = 0644,
90771 .proc_handler = proc_dointvec_minmax_sysadmin,
90772+#ifdef CONFIG_GRKERNSEC_HIDESYM
90773+ .extra1 = &two,
90774+#else
90775 .extra1 = &zero,
90776+#endif
90777 .extra2 = &two,
90778 },
90779-#endif
90780 {
90781 .procname = "ngroups_max",
90782 .data = &ngroups_max,
90783@@ -1061,10 +1093,17 @@ static struct ctl_table kern_table[] = {
90784 */
90785 {
90786 .procname = "perf_event_paranoid",
90787- .data = &sysctl_perf_event_paranoid,
90788- .maxlen = sizeof(sysctl_perf_event_paranoid),
90789+ .data = &sysctl_perf_event_legitimately_concerned,
90790+ .maxlen = sizeof(sysctl_perf_event_legitimately_concerned),
90791 .mode = 0644,
90792- .proc_handler = proc_dointvec,
90793+ /* go ahead, be a hero */
90794+ .proc_handler = proc_dointvec_minmax_sysadmin,
90795+ .extra1 = &neg_one,
90796+#ifdef CONFIG_GRKERNSEC_PERF_HARDEN
90797+ .extra2 = &three,
90798+#else
90799+ .extra2 = &two,
90800+#endif
90801 },
90802 {
90803 .procname = "perf_event_mlock_kb",
90804@@ -1335,6 +1374,13 @@ static struct ctl_table vm_table[] = {
90805 .proc_handler = proc_dointvec_minmax,
90806 .extra1 = &zero,
90807 },
90808+ {
90809+ .procname = "heap_stack_gap",
90810+ .data = &sysctl_heap_stack_gap,
90811+ .maxlen = sizeof(sysctl_heap_stack_gap),
90812+ .mode = 0644,
90813+ .proc_handler = proc_doulongvec_minmax,
90814+ },
90815 #else
90816 {
90817 .procname = "nr_trim_pages",
90818@@ -1799,6 +1845,16 @@ int proc_dostring(struct ctl_table *table, int write,
90819 buffer, lenp, ppos);
90820 }
90821
90822+int proc_dostring_modpriv(struct ctl_table *table, int write,
90823+ void __user *buffer, size_t *lenp, loff_t *ppos)
90824+{
90825+ if (write && !capable(CAP_SYS_MODULE))
90826+ return -EPERM;
90827+
90828+ return _proc_do_string(table->data, table->maxlen, write,
90829+ buffer, lenp, ppos);
90830+}
90831+
90832 static size_t proc_skip_spaces(char **buf)
90833 {
90834 size_t ret;
90835@@ -1904,6 +1960,8 @@ static int proc_put_long(void __user **buf, size_t *size, unsigned long val,
90836 len = strlen(tmp);
90837 if (len > *size)
90838 len = *size;
90839+ if (len > sizeof(tmp))
90840+ len = sizeof(tmp);
90841 if (copy_to_user(*buf, tmp, len))
90842 return -EFAULT;
90843 *size -= len;
90844@@ -2068,7 +2126,7 @@ int proc_dointvec(struct ctl_table *table, int write,
90845 static int proc_taint(struct ctl_table *table, int write,
90846 void __user *buffer, size_t *lenp, loff_t *ppos)
90847 {
90848- struct ctl_table t;
90849+ ctl_table_no_const t;
90850 unsigned long tmptaint = get_taint();
90851 int err;
90852
90853@@ -2096,7 +2154,6 @@ static int proc_taint(struct ctl_table *table, int write,
90854 return err;
90855 }
90856
90857-#ifdef CONFIG_PRINTK
90858 static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
90859 void __user *buffer, size_t *lenp, loff_t *ppos)
90860 {
90861@@ -2105,7 +2162,6 @@ static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
90862
90863 return proc_dointvec_minmax(table, write, buffer, lenp, ppos);
90864 }
90865-#endif
90866
90867 struct do_proc_dointvec_minmax_conv_param {
90868 int *min;
90869@@ -2652,6 +2708,12 @@ int proc_dostring(struct ctl_table *table, int write,
90870 return -ENOSYS;
90871 }
90872
90873+int proc_dostring_modpriv(struct ctl_table *table, int write,
90874+ void __user *buffer, size_t *lenp, loff_t *ppos)
90875+{
90876+ return -ENOSYS;
90877+}
90878+
90879 int proc_dointvec(struct ctl_table *table, int write,
90880 void __user *buffer, size_t *lenp, loff_t *ppos)
90881 {
90882@@ -2708,5 +2770,6 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
90883 EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
90884 EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
90885 EXPORT_SYMBOL(proc_dostring);
90886+EXPORT_SYMBOL(proc_dostring_modpriv);
90887 EXPORT_SYMBOL(proc_doulongvec_minmax);
90888 EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
90889diff --git a/kernel/taskstats.c b/kernel/taskstats.c
90890index 13d2f7c..c93d0b0 100644
90891--- a/kernel/taskstats.c
90892+++ b/kernel/taskstats.c
90893@@ -28,9 +28,12 @@
90894 #include <linux/fs.h>
90895 #include <linux/file.h>
90896 #include <linux/pid_namespace.h>
90897+#include <linux/grsecurity.h>
90898 #include <net/genetlink.h>
90899 #include <linux/atomic.h>
90900
90901+extern int gr_is_taskstats_denied(int pid);
90902+
90903 /*
90904 * Maximum length of a cpumask that can be specified in
90905 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
90906@@ -576,6 +579,9 @@ err:
90907
90908 static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
90909 {
90910+ if (gr_is_taskstats_denied(current->pid))
90911+ return -EACCES;
90912+
90913 if (info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK])
90914 return cmd_attr_register_cpumask(info);
90915 else if (info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK])
90916diff --git a/kernel/time.c b/kernel/time.c
90917index 7c7964c..2a0d412 100644
90918--- a/kernel/time.c
90919+++ b/kernel/time.c
90920@@ -172,6 +172,11 @@ int do_sys_settimeofday(const struct timespec *tv, const struct timezone *tz)
90921 return error;
90922
90923 if (tz) {
90924+ /* we log in do_settimeofday called below, so don't log twice
90925+ */
90926+ if (!tv)
90927+ gr_log_timechange();
90928+
90929 sys_tz = *tz;
90930 update_vsyscall_tz();
90931 if (firsttime) {
90932diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
90933index 88c9c65..7497ebc 100644
90934--- a/kernel/time/alarmtimer.c
90935+++ b/kernel/time/alarmtimer.c
90936@@ -795,7 +795,7 @@ static int __init alarmtimer_init(void)
90937 struct platform_device *pdev;
90938 int error = 0;
90939 int i;
90940- struct k_clock alarm_clock = {
90941+ static struct k_clock alarm_clock = {
90942 .clock_getres = alarm_clock_getres,
90943 .clock_get = alarm_clock_get,
90944 .timer_create = alarm_timer_create,
90945diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
90946index 5b40279..81e58db 100644
90947--- a/kernel/time/timekeeping.c
90948+++ b/kernel/time/timekeeping.c
90949@@ -15,6 +15,7 @@
90950 #include <linux/init.h>
90951 #include <linux/mm.h>
90952 #include <linux/sched.h>
90953+#include <linux/grsecurity.h>
90954 #include <linux/syscore_ops.h>
90955 #include <linux/clocksource.h>
90956 #include <linux/jiffies.h>
90957@@ -501,6 +502,8 @@ int do_settimeofday(const struct timespec *tv)
90958 if (!timespec_valid_strict(tv))
90959 return -EINVAL;
90960
90961+ gr_log_timechange();
90962+
90963 raw_spin_lock_irqsave(&timekeeper_lock, flags);
90964 write_seqcount_begin(&timekeeper_seq);
90965
90966diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
90967index 61ed862..3b52c65 100644
90968--- a/kernel/time/timer_list.c
90969+++ b/kernel/time/timer_list.c
90970@@ -45,12 +45,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases);
90971
90972 static void print_name_offset(struct seq_file *m, void *sym)
90973 {
90974+#ifdef CONFIG_GRKERNSEC_HIDESYM
90975+ SEQ_printf(m, "<%p>", NULL);
90976+#else
90977 char symname[KSYM_NAME_LEN];
90978
90979 if (lookup_symbol_name((unsigned long)sym, symname) < 0)
90980 SEQ_printf(m, "<%pK>", sym);
90981 else
90982 SEQ_printf(m, "%s", symname);
90983+#endif
90984 }
90985
90986 static void
90987@@ -119,7 +123,11 @@ next_one:
90988 static void
90989 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
90990 {
90991+#ifdef CONFIG_GRKERNSEC_HIDESYM
90992+ SEQ_printf(m, " .base: %p\n", NULL);
90993+#else
90994 SEQ_printf(m, " .base: %pK\n", base);
90995+#endif
90996 SEQ_printf(m, " .index: %d\n",
90997 base->index);
90998 SEQ_printf(m, " .resolution: %Lu nsecs\n",
90999@@ -362,7 +370,11 @@ static int __init init_timer_list_procfs(void)
91000 {
91001 struct proc_dir_entry *pe;
91002
91003+#ifdef CONFIG_GRKERNSEC_PROC_ADD
91004+ pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
91005+#else
91006 pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
91007+#endif
91008 if (!pe)
91009 return -ENOMEM;
91010 return 0;
91011diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c
91012index 1fb08f2..ca4bb1e 100644
91013--- a/kernel/time/timer_stats.c
91014+++ b/kernel/time/timer_stats.c
91015@@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
91016 static unsigned long nr_entries;
91017 static struct entry entries[MAX_ENTRIES];
91018
91019-static atomic_t overflow_count;
91020+static atomic_unchecked_t overflow_count;
91021
91022 /*
91023 * The entries are in a hash-table, for fast lookup:
91024@@ -140,7 +140,7 @@ static void reset_entries(void)
91025 nr_entries = 0;
91026 memset(entries, 0, sizeof(entries));
91027 memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
91028- atomic_set(&overflow_count, 0);
91029+ atomic_set_unchecked(&overflow_count, 0);
91030 }
91031
91032 static struct entry *alloc_entry(void)
91033@@ -261,7 +261,7 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
91034 if (likely(entry))
91035 entry->count++;
91036 else
91037- atomic_inc(&overflow_count);
91038+ atomic_inc_unchecked(&overflow_count);
91039
91040 out_unlock:
91041 raw_spin_unlock_irqrestore(lock, flags);
91042@@ -269,12 +269,16 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
91043
91044 static void print_name_offset(struct seq_file *m, unsigned long addr)
91045 {
91046+#ifdef CONFIG_GRKERNSEC_HIDESYM
91047+ seq_printf(m, "<%p>", NULL);
91048+#else
91049 char symname[KSYM_NAME_LEN];
91050
91051 if (lookup_symbol_name(addr, symname) < 0)
91052- seq_printf(m, "<%p>", (void *)addr);
91053+ seq_printf(m, "<%pK>", (void *)addr);
91054 else
91055 seq_printf(m, "%s", symname);
91056+#endif
91057 }
91058
91059 static int tstats_show(struct seq_file *m, void *v)
91060@@ -300,8 +304,8 @@ static int tstats_show(struct seq_file *m, void *v)
91061
91062 seq_puts(m, "Timer Stats Version: v0.3\n");
91063 seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
91064- if (atomic_read(&overflow_count))
91065- seq_printf(m, "Overflow: %d entries\n", atomic_read(&overflow_count));
91066+ if (atomic_read_unchecked(&overflow_count))
91067+ seq_printf(m, "Overflow: %d entries\n", atomic_read_unchecked(&overflow_count));
91068 seq_printf(m, "Collection: %s\n", timer_stats_active ? "active" : "inactive");
91069
91070 for (i = 0; i < nr_entries; i++) {
91071@@ -417,7 +421,11 @@ static int __init init_tstats_procfs(void)
91072 {
91073 struct proc_dir_entry *pe;
91074
91075+#ifdef CONFIG_GRKERNSEC_PROC_ADD
91076+ pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
91077+#else
91078 pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
91079+#endif
91080 if (!pe)
91081 return -ENOMEM;
91082 return 0;
91083diff --git a/kernel/timer.c b/kernel/timer.c
91084index 38f0d40..96b2ebf 100644
91085--- a/kernel/timer.c
91086+++ b/kernel/timer.c
91087@@ -1366,7 +1366,7 @@ void update_process_times(int user_tick)
91088 /*
91089 * This function runs timers and the timer-tq in bottom half context.
91090 */
91091-static void run_timer_softirq(struct softirq_action *h)
91092+static __latent_entropy void run_timer_softirq(void)
91093 {
91094 struct tvec_base *base = __this_cpu_read(tvec_bases);
91095
91096@@ -1429,7 +1429,7 @@ static void process_timeout(unsigned long __data)
91097 *
91098 * In all cases the return value is guaranteed to be non-negative.
91099 */
91100-signed long __sched schedule_timeout(signed long timeout)
91101+signed long __sched __intentional_overflow(-1) schedule_timeout(signed long timeout)
91102 {
91103 struct timer_list timer;
91104 unsigned long expire;
91105diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
91106index 4f3a3c03..04b7886 100644
91107--- a/kernel/trace/blktrace.c
91108+++ b/kernel/trace/blktrace.c
91109@@ -328,7 +328,7 @@ static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
91110 struct blk_trace *bt = filp->private_data;
91111 char buf[16];
91112
91113- snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
91114+ snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
91115
91116 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
91117 }
91118@@ -386,7 +386,7 @@ static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
91119 return 1;
91120
91121 bt = buf->chan->private_data;
91122- atomic_inc(&bt->dropped);
91123+ atomic_inc_unchecked(&bt->dropped);
91124 return 0;
91125 }
91126
91127@@ -487,7 +487,7 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
91128
91129 bt->dir = dir;
91130 bt->dev = dev;
91131- atomic_set(&bt->dropped, 0);
91132+ atomic_set_unchecked(&bt->dropped, 0);
91133 INIT_LIST_HEAD(&bt->running_list);
91134
91135 ret = -EIO;
91136diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
91137index 868633e..921dc41 100644
91138--- a/kernel/trace/ftrace.c
91139+++ b/kernel/trace/ftrace.c
91140@@ -1965,12 +1965,17 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
91141 if (unlikely(ftrace_disabled))
91142 return 0;
91143
91144+ ret = ftrace_arch_code_modify_prepare();
91145+ FTRACE_WARN_ON(ret);
91146+ if (ret)
91147+ return 0;
91148+
91149 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
91150+ FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
91151 if (ret) {
91152 ftrace_bug(ret, ip);
91153- return 0;
91154 }
91155- return 1;
91156+ return ret ? 0 : 1;
91157 }
91158
91159 /*
91160@@ -4177,8 +4182,10 @@ static int ftrace_process_locs(struct module *mod,
91161 if (!count)
91162 return 0;
91163
91164+ pax_open_kernel();
91165 sort(start, count, sizeof(*start),
91166 ftrace_cmp_ips, ftrace_swap_ips);
91167+ pax_close_kernel();
91168
91169 start_pg = ftrace_allocate_pages(count);
91170 if (!start_pg)
91171@@ -4890,8 +4897,6 @@ ftrace_enable_sysctl(struct ctl_table *table, int write,
91172 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
91173
91174 static int ftrace_graph_active;
91175-static struct notifier_block ftrace_suspend_notifier;
91176-
91177 int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
91178 {
91179 return 0;
91180@@ -5067,6 +5072,10 @@ static void update_function_graph_func(void)
91181 ftrace_graph_entry = ftrace_graph_entry_test;
91182 }
91183
91184+static struct notifier_block ftrace_suspend_notifier = {
91185+ .notifier_call = ftrace_suspend_notifier_call
91186+};
91187+
91188 int register_ftrace_graph(trace_func_graph_ret_t retfunc,
91189 trace_func_graph_ent_t entryfunc)
91190 {
91191@@ -5080,7 +5089,6 @@ int register_ftrace_graph(trace_func_graph_ret_t retfunc,
91192 goto out;
91193 }
91194
91195- ftrace_suspend_notifier.notifier_call = ftrace_suspend_notifier_call;
91196 register_pm_notifier(&ftrace_suspend_notifier);
91197
91198 ftrace_graph_active++;
91199diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
91200index fc4da2d..f3e800b 100644
91201--- a/kernel/trace/ring_buffer.c
91202+++ b/kernel/trace/ring_buffer.c
91203@@ -352,9 +352,9 @@ struct buffer_data_page {
91204 */
91205 struct buffer_page {
91206 struct list_head list; /* list of buffer pages */
91207- local_t write; /* index for next write */
91208+ local_unchecked_t write; /* index for next write */
91209 unsigned read; /* index for next read */
91210- local_t entries; /* entries on this page */
91211+ local_unchecked_t entries; /* entries on this page */
91212 unsigned long real_end; /* real end of data */
91213 struct buffer_data_page *page; /* Actual data page */
91214 };
91215@@ -473,8 +473,8 @@ struct ring_buffer_per_cpu {
91216 unsigned long last_overrun;
91217 local_t entries_bytes;
91218 local_t entries;
91219- local_t overrun;
91220- local_t commit_overrun;
91221+ local_unchecked_t overrun;
91222+ local_unchecked_t commit_overrun;
91223 local_t dropped_events;
91224 local_t committing;
91225 local_t commits;
91226@@ -992,8 +992,8 @@ static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
91227 *
91228 * We add a counter to the write field to denote this.
91229 */
91230- old_write = local_add_return(RB_WRITE_INTCNT, &next_page->write);
91231- old_entries = local_add_return(RB_WRITE_INTCNT, &next_page->entries);
91232+ old_write = local_add_return_unchecked(RB_WRITE_INTCNT, &next_page->write);
91233+ old_entries = local_add_return_unchecked(RB_WRITE_INTCNT, &next_page->entries);
91234
91235 /*
91236 * Just make sure we have seen our old_write and synchronize
91237@@ -1021,8 +1021,8 @@ static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
91238 * cmpxchg to only update if an interrupt did not already
91239 * do it for us. If the cmpxchg fails, we don't care.
91240 */
91241- (void)local_cmpxchg(&next_page->write, old_write, val);
91242- (void)local_cmpxchg(&next_page->entries, old_entries, eval);
91243+ (void)local_cmpxchg_unchecked(&next_page->write, old_write, val);
91244+ (void)local_cmpxchg_unchecked(&next_page->entries, old_entries, eval);
91245
91246 /*
91247 * No need to worry about races with clearing out the commit.
91248@@ -1386,12 +1386,12 @@ static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
91249
91250 static inline unsigned long rb_page_entries(struct buffer_page *bpage)
91251 {
91252- return local_read(&bpage->entries) & RB_WRITE_MASK;
91253+ return local_read_unchecked(&bpage->entries) & RB_WRITE_MASK;
91254 }
91255
91256 static inline unsigned long rb_page_write(struct buffer_page *bpage)
91257 {
91258- return local_read(&bpage->write) & RB_WRITE_MASK;
91259+ return local_read_unchecked(&bpage->write) & RB_WRITE_MASK;
91260 }
91261
91262 static int
91263@@ -1486,7 +1486,7 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned int nr_pages)
91264 * bytes consumed in ring buffer from here.
91265 * Increment overrun to account for the lost events.
91266 */
91267- local_add(page_entries, &cpu_buffer->overrun);
91268+ local_add_unchecked(page_entries, &cpu_buffer->overrun);
91269 local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
91270 }
91271
91272@@ -2064,7 +2064,7 @@ rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer,
91273 * it is our responsibility to update
91274 * the counters.
91275 */
91276- local_add(entries, &cpu_buffer->overrun);
91277+ local_add_unchecked(entries, &cpu_buffer->overrun);
91278 local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
91279
91280 /*
91281@@ -2214,7 +2214,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
91282 if (tail == BUF_PAGE_SIZE)
91283 tail_page->real_end = 0;
91284
91285- local_sub(length, &tail_page->write);
91286+ local_sub_unchecked(length, &tail_page->write);
91287 return;
91288 }
91289
91290@@ -2249,7 +2249,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
91291 rb_event_set_padding(event);
91292
91293 /* Set the write back to the previous setting */
91294- local_sub(length, &tail_page->write);
91295+ local_sub_unchecked(length, &tail_page->write);
91296 return;
91297 }
91298
91299@@ -2261,7 +2261,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
91300
91301 /* Set write to end of buffer */
91302 length = (tail + length) - BUF_PAGE_SIZE;
91303- local_sub(length, &tail_page->write);
91304+ local_sub_unchecked(length, &tail_page->write);
91305 }
91306
91307 /*
91308@@ -2287,7 +2287,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
91309 * about it.
91310 */
91311 if (unlikely(next_page == commit_page)) {
91312- local_inc(&cpu_buffer->commit_overrun);
91313+ local_inc_unchecked(&cpu_buffer->commit_overrun);
91314 goto out_reset;
91315 }
91316
91317@@ -2343,7 +2343,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
91318 cpu_buffer->tail_page) &&
91319 (cpu_buffer->commit_page ==
91320 cpu_buffer->reader_page))) {
91321- local_inc(&cpu_buffer->commit_overrun);
91322+ local_inc_unchecked(&cpu_buffer->commit_overrun);
91323 goto out_reset;
91324 }
91325 }
91326@@ -2391,7 +2391,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
91327 length += RB_LEN_TIME_EXTEND;
91328
91329 tail_page = cpu_buffer->tail_page;
91330- write = local_add_return(length, &tail_page->write);
91331+ write = local_add_return_unchecked(length, &tail_page->write);
91332
91333 /* set write to only the index of the write */
91334 write &= RB_WRITE_MASK;
91335@@ -2415,7 +2415,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
91336 kmemcheck_annotate_bitfield(event, bitfield);
91337 rb_update_event(cpu_buffer, event, length, add_timestamp, delta);
91338
91339- local_inc(&tail_page->entries);
91340+ local_inc_unchecked(&tail_page->entries);
91341
91342 /*
91343 * If this is the first commit on the page, then update
91344@@ -2448,7 +2448,7 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
91345
91346 if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) {
91347 unsigned long write_mask =
91348- local_read(&bpage->write) & ~RB_WRITE_MASK;
91349+ local_read_unchecked(&bpage->write) & ~RB_WRITE_MASK;
91350 unsigned long event_length = rb_event_length(event);
91351 /*
91352 * This is on the tail page. It is possible that
91353@@ -2458,7 +2458,7 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
91354 */
91355 old_index += write_mask;
91356 new_index += write_mask;
91357- index = local_cmpxchg(&bpage->write, old_index, new_index);
91358+ index = local_cmpxchg_unchecked(&bpage->write, old_index, new_index);
91359 if (index == old_index) {
91360 /* update counters */
91361 local_sub(event_length, &cpu_buffer->entries_bytes);
91362@@ -2850,7 +2850,7 @@ rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
91363
91364 /* Do the likely case first */
91365 if (likely(bpage->page == (void *)addr)) {
91366- local_dec(&bpage->entries);
91367+ local_dec_unchecked(&bpage->entries);
91368 return;
91369 }
91370
91371@@ -2862,7 +2862,7 @@ rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
91372 start = bpage;
91373 do {
91374 if (bpage->page == (void *)addr) {
91375- local_dec(&bpage->entries);
91376+ local_dec_unchecked(&bpage->entries);
91377 return;
91378 }
91379 rb_inc_page(cpu_buffer, &bpage);
91380@@ -3146,7 +3146,7 @@ static inline unsigned long
91381 rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer)
91382 {
91383 return local_read(&cpu_buffer->entries) -
91384- (local_read(&cpu_buffer->overrun) + cpu_buffer->read);
91385+ (local_read_unchecked(&cpu_buffer->overrun) + cpu_buffer->read);
91386 }
91387
91388 /**
91389@@ -3235,7 +3235,7 @@ unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
91390 return 0;
91391
91392 cpu_buffer = buffer->buffers[cpu];
91393- ret = local_read(&cpu_buffer->overrun);
91394+ ret = local_read_unchecked(&cpu_buffer->overrun);
91395
91396 return ret;
91397 }
91398@@ -3258,7 +3258,7 @@ ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu)
91399 return 0;
91400
91401 cpu_buffer = buffer->buffers[cpu];
91402- ret = local_read(&cpu_buffer->commit_overrun);
91403+ ret = local_read_unchecked(&cpu_buffer->commit_overrun);
91404
91405 return ret;
91406 }
91407@@ -3343,7 +3343,7 @@ unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
91408 /* if you care about this being correct, lock the buffer */
91409 for_each_buffer_cpu(buffer, cpu) {
91410 cpu_buffer = buffer->buffers[cpu];
91411- overruns += local_read(&cpu_buffer->overrun);
91412+ overruns += local_read_unchecked(&cpu_buffer->overrun);
91413 }
91414
91415 return overruns;
91416@@ -3519,8 +3519,8 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
91417 /*
91418 * Reset the reader page to size zero.
91419 */
91420- local_set(&cpu_buffer->reader_page->write, 0);
91421- local_set(&cpu_buffer->reader_page->entries, 0);
91422+ local_set_unchecked(&cpu_buffer->reader_page->write, 0);
91423+ local_set_unchecked(&cpu_buffer->reader_page->entries, 0);
91424 local_set(&cpu_buffer->reader_page->page->commit, 0);
91425 cpu_buffer->reader_page->real_end = 0;
91426
91427@@ -3554,7 +3554,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
91428 * want to compare with the last_overrun.
91429 */
91430 smp_mb();
91431- overwrite = local_read(&(cpu_buffer->overrun));
91432+ overwrite = local_read_unchecked(&(cpu_buffer->overrun));
91433
91434 /*
91435 * Here's the tricky part.
91436@@ -4124,8 +4124,8 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
91437
91438 cpu_buffer->head_page
91439 = list_entry(cpu_buffer->pages, struct buffer_page, list);
91440- local_set(&cpu_buffer->head_page->write, 0);
91441- local_set(&cpu_buffer->head_page->entries, 0);
91442+ local_set_unchecked(&cpu_buffer->head_page->write, 0);
91443+ local_set_unchecked(&cpu_buffer->head_page->entries, 0);
91444 local_set(&cpu_buffer->head_page->page->commit, 0);
91445
91446 cpu_buffer->head_page->read = 0;
91447@@ -4135,14 +4135,14 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
91448
91449 INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
91450 INIT_LIST_HEAD(&cpu_buffer->new_pages);
91451- local_set(&cpu_buffer->reader_page->write, 0);
91452- local_set(&cpu_buffer->reader_page->entries, 0);
91453+ local_set_unchecked(&cpu_buffer->reader_page->write, 0);
91454+ local_set_unchecked(&cpu_buffer->reader_page->entries, 0);
91455 local_set(&cpu_buffer->reader_page->page->commit, 0);
91456 cpu_buffer->reader_page->read = 0;
91457
91458 local_set(&cpu_buffer->entries_bytes, 0);
91459- local_set(&cpu_buffer->overrun, 0);
91460- local_set(&cpu_buffer->commit_overrun, 0);
91461+ local_set_unchecked(&cpu_buffer->overrun, 0);
91462+ local_set_unchecked(&cpu_buffer->commit_overrun, 0);
91463 local_set(&cpu_buffer->dropped_events, 0);
91464 local_set(&cpu_buffer->entries, 0);
91465 local_set(&cpu_buffer->committing, 0);
91466@@ -4547,8 +4547,8 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
91467 rb_init_page(bpage);
91468 bpage = reader->page;
91469 reader->page = *data_page;
91470- local_set(&reader->write, 0);
91471- local_set(&reader->entries, 0);
91472+ local_set_unchecked(&reader->write, 0);
91473+ local_set_unchecked(&reader->entries, 0);
91474 reader->read = 0;
91475 *data_page = bpage;
91476
91477diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
91478index 24c1f23..781fd73f 100644
91479--- a/kernel/trace/trace.c
91480+++ b/kernel/trace/trace.c
91481@@ -3399,7 +3399,7 @@ int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
91482 return 0;
91483 }
91484
91485-int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
91486+int set_tracer_flag(struct trace_array *tr, unsigned long mask, int enabled)
91487 {
91488 /* do nothing if flag is already set */
91489 if (!!(trace_flags & mask) == !!enabled)
91490diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
91491index 02b592f..f971546 100644
91492--- a/kernel/trace/trace.h
91493+++ b/kernel/trace/trace.h
91494@@ -1233,7 +1233,7 @@ extern const char *__stop___tracepoint_str[];
91495 void trace_printk_init_buffers(void);
91496 void trace_printk_start_comm(void);
91497 int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set);
91498-int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled);
91499+int set_tracer_flag(struct trace_array *tr, unsigned long mask, int enabled);
91500
91501 /*
91502 * Normal trace_printk() and friends allocates special buffers
91503diff --git a/kernel/trace/trace_clock.c b/kernel/trace/trace_clock.c
91504index 26dc348..8708ca7 100644
91505--- a/kernel/trace/trace_clock.c
91506+++ b/kernel/trace/trace_clock.c
91507@@ -123,7 +123,7 @@ u64 notrace trace_clock_global(void)
91508 return now;
91509 }
91510
91511-static atomic64_t trace_counter;
91512+static atomic64_unchecked_t trace_counter;
91513
91514 /*
91515 * trace_clock_counter(): simply an atomic counter.
91516@@ -132,5 +132,5 @@ static atomic64_t trace_counter;
91517 */
91518 u64 notrace trace_clock_counter(void)
91519 {
91520- return atomic64_add_return(1, &trace_counter);
91521+ return atomic64_inc_return_unchecked(&trace_counter);
91522 }
91523diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
91524index 7b16d40..1b2875d 100644
91525--- a/kernel/trace/trace_events.c
91526+++ b/kernel/trace/trace_events.c
91527@@ -1681,7 +1681,6 @@ __trace_early_add_new_event(struct ftrace_event_call *call,
91528 return 0;
91529 }
91530
91531-struct ftrace_module_file_ops;
91532 static void __add_event_to_tracers(struct ftrace_event_call *call);
91533
91534 /* Add an additional event_call dynamically */
91535diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c
91536index 0abd9b8..6a663a2 100644
91537--- a/kernel/trace/trace_mmiotrace.c
91538+++ b/kernel/trace/trace_mmiotrace.c
91539@@ -24,7 +24,7 @@ struct header_iter {
91540 static struct trace_array *mmio_trace_array;
91541 static bool overrun_detected;
91542 static unsigned long prev_overruns;
91543-static atomic_t dropped_count;
91544+static atomic_unchecked_t dropped_count;
91545
91546 static void mmio_reset_data(struct trace_array *tr)
91547 {
91548@@ -127,7 +127,7 @@ static void mmio_close(struct trace_iterator *iter)
91549
91550 static unsigned long count_overruns(struct trace_iterator *iter)
91551 {
91552- unsigned long cnt = atomic_xchg(&dropped_count, 0);
91553+ unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
91554 unsigned long over = ring_buffer_overruns(iter->trace_buffer->buffer);
91555
91556 if (over > prev_overruns)
91557@@ -317,7 +317,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr,
91558 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
91559 sizeof(*entry), 0, pc);
91560 if (!event) {
91561- atomic_inc(&dropped_count);
91562+ atomic_inc_unchecked(&dropped_count);
91563 return;
91564 }
91565 entry = ring_buffer_event_data(event);
91566@@ -347,7 +347,7 @@ static void __trace_mmiotrace_map(struct trace_array *tr,
91567 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
91568 sizeof(*entry), 0, pc);
91569 if (!event) {
91570- atomic_inc(&dropped_count);
91571+ atomic_inc_unchecked(&dropped_count);
91572 return;
91573 }
91574 entry = ring_buffer_event_data(event);
91575diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
91576index ed32284..884d6c3 100644
91577--- a/kernel/trace/trace_output.c
91578+++ b/kernel/trace/trace_output.c
91579@@ -294,7 +294,7 @@ int trace_seq_path(struct trace_seq *s, const struct path *path)
91580
91581 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
91582 if (!IS_ERR(p)) {
91583- p = mangle_path(s->buffer + s->len, p, "\n");
91584+ p = mangle_path(s->buffer + s->len, p, "\n\\");
91585 if (p) {
91586 s->len = p - s->buffer;
91587 return 1;
91588@@ -908,14 +908,16 @@ int register_ftrace_event(struct trace_event *event)
91589 goto out;
91590 }
91591
91592+ pax_open_kernel();
91593 if (event->funcs->trace == NULL)
91594- event->funcs->trace = trace_nop_print;
91595+ *(void **)&event->funcs->trace = trace_nop_print;
91596 if (event->funcs->raw == NULL)
91597- event->funcs->raw = trace_nop_print;
91598+ *(void **)&event->funcs->raw = trace_nop_print;
91599 if (event->funcs->hex == NULL)
91600- event->funcs->hex = trace_nop_print;
91601+ *(void **)&event->funcs->hex = trace_nop_print;
91602 if (event->funcs->binary == NULL)
91603- event->funcs->binary = trace_nop_print;
91604+ *(void **)&event->funcs->binary = trace_nop_print;
91605+ pax_close_kernel();
91606
91607 key = event->type & (EVENT_HASHSIZE - 1);
91608
91609diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
91610index e6be585..d73ae5e 100644
91611--- a/kernel/trace/trace_stack.c
91612+++ b/kernel/trace/trace_stack.c
91613@@ -68,7 +68,7 @@ check_stack(unsigned long ip, unsigned long *stack)
91614 return;
91615
91616 /* we do not handle interrupt stacks yet */
91617- if (!object_is_on_stack(stack))
91618+ if (!object_starts_on_stack(stack))
91619 return;
91620
91621 local_irq_save(flags);
91622diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c
91623index 80a57af..7f5a7ff 100644
91624--- a/kernel/user_namespace.c
91625+++ b/kernel/user_namespace.c
91626@@ -82,6 +82,21 @@ int create_user_ns(struct cred *new)
91627 !kgid_has_mapping(parent_ns, group))
91628 return -EPERM;
91629
91630+#ifdef CONFIG_GRKERNSEC
91631+ /*
91632+ * This doesn't really inspire confidence:
91633+ * http://marc.info/?l=linux-kernel&m=135543612731939&w=2
91634+ * http://marc.info/?l=linux-kernel&m=135545831607095&w=2
91635+ * Increases kernel attack surface in areas developers
91636+ * previously cared little about ("low importance due
91637+ * to requiring "root" capability")
91638+ * To be removed when this code receives *proper* review
91639+ */
91640+ if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SETUID) ||
91641+ !capable(CAP_SETGID))
91642+ return -EPERM;
91643+#endif
91644+
91645 ns = kmem_cache_zalloc(user_ns_cachep, GFP_KERNEL);
91646 if (!ns)
91647 return -ENOMEM;
91648@@ -865,7 +880,7 @@ static int userns_install(struct nsproxy *nsproxy, void *ns)
91649 if (atomic_read(&current->mm->mm_users) > 1)
91650 return -EINVAL;
91651
91652- if (current->fs->users != 1)
91653+ if (atomic_read(&current->fs->users) != 1)
91654 return -EINVAL;
91655
91656 if (!ns_capable(user_ns, CAP_SYS_ADMIN))
91657diff --git a/kernel/utsname_sysctl.c b/kernel/utsname_sysctl.c
91658index 4f69f9a..7c6f8f8 100644
91659--- a/kernel/utsname_sysctl.c
91660+++ b/kernel/utsname_sysctl.c
91661@@ -47,7 +47,7 @@ static void put_uts(ctl_table *table, int write, void *which)
91662 static int proc_do_uts_string(ctl_table *table, int write,
91663 void __user *buffer, size_t *lenp, loff_t *ppos)
91664 {
91665- struct ctl_table uts_table;
91666+ ctl_table_no_const uts_table;
91667 int r;
91668 memcpy(&uts_table, table, sizeof(uts_table));
91669 uts_table.data = get_uts(table, write);
91670diff --git a/kernel/watchdog.c b/kernel/watchdog.c
91671index 4431610..4265616 100644
91672--- a/kernel/watchdog.c
91673+++ b/kernel/watchdog.c
91674@@ -475,7 +475,7 @@ static int watchdog_nmi_enable(unsigned int cpu) { return 0; }
91675 static void watchdog_nmi_disable(unsigned int cpu) { return; }
91676 #endif /* CONFIG_HARDLOCKUP_DETECTOR */
91677
91678-static struct smp_hotplug_thread watchdog_threads = {
91679+static struct smp_hotplug_thread watchdog_threads __read_only = {
91680 .store = &softlockup_watchdog,
91681 .thread_should_run = watchdog_should_run,
91682 .thread_fn = watchdog,
91683diff --git a/kernel/workqueue.c b/kernel/workqueue.c
91684index b6a3941..b68f191 100644
91685--- a/kernel/workqueue.c
91686+++ b/kernel/workqueue.c
91687@@ -4702,7 +4702,7 @@ static void rebind_workers(struct worker_pool *pool)
91688 WARN_ON_ONCE(!(worker_flags & WORKER_UNBOUND));
91689 worker_flags |= WORKER_REBOUND;
91690 worker_flags &= ~WORKER_UNBOUND;
91691- ACCESS_ONCE(worker->flags) = worker_flags;
91692+ ACCESS_ONCE_RW(worker->flags) = worker_flags;
91693 }
91694
91695 spin_unlock_irq(&pool->lock);
91696diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
91697index a48abea..e108def 100644
91698--- a/lib/Kconfig.debug
91699+++ b/lib/Kconfig.debug
91700@@ -854,7 +854,7 @@ config DEBUG_MUTEXES
91701
91702 config DEBUG_WW_MUTEX_SLOWPATH
91703 bool "Wait/wound mutex debugging: Slowpath testing"
91704- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
91705+ depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
91706 select DEBUG_LOCK_ALLOC
91707 select DEBUG_SPINLOCK
91708 select DEBUG_MUTEXES
91709@@ -867,7 +867,7 @@ config DEBUG_WW_MUTEX_SLOWPATH
91710
91711 config DEBUG_LOCK_ALLOC
91712 bool "Lock debugging: detect incorrect freeing of live locks"
91713- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
91714+ depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
91715 select DEBUG_SPINLOCK
91716 select DEBUG_MUTEXES
91717 select LOCKDEP
91718@@ -881,7 +881,7 @@ config DEBUG_LOCK_ALLOC
91719
91720 config PROVE_LOCKING
91721 bool "Lock debugging: prove locking correctness"
91722- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
91723+ depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
91724 select LOCKDEP
91725 select DEBUG_SPINLOCK
91726 select DEBUG_MUTEXES
91727@@ -932,7 +932,7 @@ config LOCKDEP
91728
91729 config LOCK_STAT
91730 bool "Lock usage statistics"
91731- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
91732+ depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
91733 select LOCKDEP
91734 select DEBUG_SPINLOCK
91735 select DEBUG_MUTEXES
91736@@ -1394,6 +1394,7 @@ config LATENCYTOP
91737 depends on DEBUG_KERNEL
91738 depends on STACKTRACE_SUPPORT
91739 depends on PROC_FS
91740+ depends on !GRKERNSEC_HIDESYM
91741 select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND && !ARC
91742 select KALLSYMS
91743 select KALLSYMS_ALL
91744@@ -1410,7 +1411,7 @@ config ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
91745 config DEBUG_STRICT_USER_COPY_CHECKS
91746 bool "Strict user copy size checks"
91747 depends on ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
91748- depends on DEBUG_KERNEL && !TRACE_BRANCH_PROFILING
91749+ depends on DEBUG_KERNEL && !TRACE_BRANCH_PROFILING && !PAX_SIZE_OVERFLOW
91750 help
91751 Enabling this option turns a certain set of sanity checks for user
91752 copy operations into compile time failures.
91753@@ -1529,7 +1530,7 @@ endmenu # runtime tests
91754
91755 config PROVIDE_OHCI1394_DMA_INIT
91756 bool "Remote debugging over FireWire early on boot"
91757- depends on PCI && X86
91758+ depends on PCI && X86 && !GRKERNSEC
91759 help
91760 If you want to debug problems which hang or crash the kernel early
91761 on boot and the crashing machine has a FireWire port, you can use
91762diff --git a/lib/Makefile b/lib/Makefile
91763index 48140e3..de854e5 100644
91764--- a/lib/Makefile
91765+++ b/lib/Makefile
91766@@ -52,7 +52,7 @@ obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o
91767 obj-$(CONFIG_BTREE) += btree.o
91768 obj-$(CONFIG_ASSOCIATIVE_ARRAY) += assoc_array.o
91769 obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o
91770-obj-$(CONFIG_DEBUG_LIST) += list_debug.o
91771+obj-y += list_debug.o
91772 obj-$(CONFIG_DEBUG_OBJECTS) += debugobjects.o
91773
91774 ifneq ($(CONFIG_HAVE_DEC_LOCK),y)
91775diff --git a/lib/average.c b/lib/average.c
91776index 114d1be..ab0350c 100644
91777--- a/lib/average.c
91778+++ b/lib/average.c
91779@@ -55,7 +55,7 @@ struct ewma *ewma_add(struct ewma *avg, unsigned long val)
91780 {
91781 unsigned long internal = ACCESS_ONCE(avg->internal);
91782
91783- ACCESS_ONCE(avg->internal) = internal ?
91784+ ACCESS_ONCE_RW(avg->internal) = internal ?
91785 (((internal << avg->weight) - internal) +
91786 (val << avg->factor)) >> avg->weight :
91787 (val << avg->factor);
91788diff --git a/lib/bitmap.c b/lib/bitmap.c
91789index 06f7e4f..f3cf2b0 100644
91790--- a/lib/bitmap.c
91791+++ b/lib/bitmap.c
91792@@ -422,7 +422,7 @@ int __bitmap_parse(const char *buf, unsigned int buflen,
91793 {
91794 int c, old_c, totaldigits, ndigits, nchunks, nbits;
91795 u32 chunk;
91796- const char __user __force *ubuf = (const char __user __force *)buf;
91797+ const char __user *ubuf = (const char __force_user *)buf;
91798
91799 bitmap_zero(maskp, nmaskbits);
91800
91801@@ -507,7 +507,7 @@ int bitmap_parse_user(const char __user *ubuf,
91802 {
91803 if (!access_ok(VERIFY_READ, ubuf, ulen))
91804 return -EFAULT;
91805- return __bitmap_parse((const char __force *)ubuf,
91806+ return __bitmap_parse((const char __force_kernel *)ubuf,
91807 ulen, 1, maskp, nmaskbits);
91808
91809 }
91810@@ -598,7 +598,7 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen,
91811 {
91812 unsigned a, b;
91813 int c, old_c, totaldigits;
91814- const char __user __force *ubuf = (const char __user __force *)buf;
91815+ const char __user *ubuf = (const char __force_user *)buf;
91816 int exp_digit, in_range;
91817
91818 totaldigits = c = 0;
91819@@ -698,7 +698,7 @@ int bitmap_parselist_user(const char __user *ubuf,
91820 {
91821 if (!access_ok(VERIFY_READ, ubuf, ulen))
91822 return -EFAULT;
91823- return __bitmap_parselist((const char __force *)ubuf,
91824+ return __bitmap_parselist((const char __force_kernel *)ubuf,
91825 ulen, 1, maskp, nmaskbits);
91826 }
91827 EXPORT_SYMBOL(bitmap_parselist_user);
91828diff --git a/lib/bug.c b/lib/bug.c
91829index 1686034..a9c00c8 100644
91830--- a/lib/bug.c
91831+++ b/lib/bug.c
91832@@ -134,6 +134,8 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
91833 return BUG_TRAP_TYPE_NONE;
91834
91835 bug = find_bug(bugaddr);
91836+ if (!bug)
91837+ return BUG_TRAP_TYPE_NONE;
91838
91839 file = NULL;
91840 line = 0;
91841diff --git a/lib/debugobjects.c b/lib/debugobjects.c
91842index e0731c3..ad66444 100644
91843--- a/lib/debugobjects.c
91844+++ b/lib/debugobjects.c
91845@@ -286,7 +286,7 @@ static void debug_object_is_on_stack(void *addr, int onstack)
91846 if (limit > 4)
91847 return;
91848
91849- is_on_stack = object_is_on_stack(addr);
91850+ is_on_stack = object_starts_on_stack(addr);
91851 if (is_on_stack == onstack)
91852 return;
91853
91854diff --git a/lib/devres.c b/lib/devres.c
91855index 8235331..5881053 100644
91856--- a/lib/devres.c
91857+++ b/lib/devres.c
91858@@ -81,7 +81,7 @@ EXPORT_SYMBOL(devm_ioremap_nocache);
91859 void devm_iounmap(struct device *dev, void __iomem *addr)
91860 {
91861 WARN_ON(devres_destroy(dev, devm_ioremap_release, devm_ioremap_match,
91862- (void *)addr));
91863+ (void __force *)addr));
91864 iounmap(addr);
91865 }
91866 EXPORT_SYMBOL(devm_iounmap);
91867@@ -224,7 +224,7 @@ void devm_ioport_unmap(struct device *dev, void __iomem *addr)
91868 {
91869 ioport_unmap(addr);
91870 WARN_ON(devres_destroy(dev, devm_ioport_map_release,
91871- devm_ioport_map_match, (void *)addr));
91872+ devm_ioport_map_match, (void __force *)addr));
91873 }
91874 EXPORT_SYMBOL(devm_ioport_unmap);
91875 #endif /* CONFIG_HAS_IOPORT */
91876diff --git a/lib/div64.c b/lib/div64.c
91877index 4382ad7..08aa558 100644
91878--- a/lib/div64.c
91879+++ b/lib/div64.c
91880@@ -59,7 +59,7 @@ uint32_t __attribute__((weak)) __div64_32(uint64_t *n, uint32_t base)
91881 EXPORT_SYMBOL(__div64_32);
91882
91883 #ifndef div_s64_rem
91884-s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
91885+s64 __intentional_overflow(-1) div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
91886 {
91887 u64 quotient;
91888
91889@@ -130,7 +130,7 @@ EXPORT_SYMBOL(div64_u64_rem);
91890 * 'http://www.hackersdelight.org/HDcode/newCode/divDouble.c.txt'
91891 */
91892 #ifndef div64_u64
91893-u64 div64_u64(u64 dividend, u64 divisor)
91894+u64 __intentional_overflow(-1) div64_u64(u64 dividend, u64 divisor)
91895 {
91896 u32 high = divisor >> 32;
91897 u64 quot;
91898diff --git a/lib/dma-debug.c b/lib/dma-debug.c
91899index 98f2d7e..899da5c 100644
91900--- a/lib/dma-debug.c
91901+++ b/lib/dma-debug.c
91902@@ -971,7 +971,7 @@ static int dma_debug_device_change(struct notifier_block *nb, unsigned long acti
91903
91904 void dma_debug_add_bus(struct bus_type *bus)
91905 {
91906- struct notifier_block *nb;
91907+ notifier_block_no_const *nb;
91908
91909 if (global_disable)
91910 return;
91911@@ -1148,7 +1148,7 @@ static void check_unmap(struct dma_debug_entry *ref)
91912
91913 static void check_for_stack(struct device *dev, void *addr)
91914 {
91915- if (object_is_on_stack(addr))
91916+ if (object_starts_on_stack(addr))
91917 err_printk(dev, NULL, "DMA-API: device driver maps memory from"
91918 "stack [addr=%p]\n", addr);
91919 }
91920diff --git a/lib/hash.c b/lib/hash.c
91921index fea973f..386626f 100644
91922--- a/lib/hash.c
91923+++ b/lib/hash.c
91924@@ -14,7 +14,7 @@
91925 #include <linux/hash.h>
91926 #include <linux/cache.h>
91927
91928-static struct fast_hash_ops arch_hash_ops __read_mostly = {
91929+static struct fast_hash_ops arch_hash_ops __read_only = {
91930 .hash = jhash,
91931 .hash2 = jhash2,
91932 };
91933diff --git a/lib/inflate.c b/lib/inflate.c
91934index 013a761..c28f3fc 100644
91935--- a/lib/inflate.c
91936+++ b/lib/inflate.c
91937@@ -269,7 +269,7 @@ static void free(void *where)
91938 malloc_ptr = free_mem_ptr;
91939 }
91940 #else
91941-#define malloc(a) kmalloc(a, GFP_KERNEL)
91942+#define malloc(a) kmalloc((a), GFP_KERNEL)
91943 #define free(a) kfree(a)
91944 #endif
91945
91946diff --git a/lib/ioremap.c b/lib/ioremap.c
91947index 0c9216c..863bd89 100644
91948--- a/lib/ioremap.c
91949+++ b/lib/ioremap.c
91950@@ -38,7 +38,7 @@ static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
91951 unsigned long next;
91952
91953 phys_addr -= addr;
91954- pmd = pmd_alloc(&init_mm, pud, addr);
91955+ pmd = pmd_alloc_kernel(&init_mm, pud, addr);
91956 if (!pmd)
91957 return -ENOMEM;
91958 do {
91959@@ -56,7 +56,7 @@ static inline int ioremap_pud_range(pgd_t *pgd, unsigned long addr,
91960 unsigned long next;
91961
91962 phys_addr -= addr;
91963- pud = pud_alloc(&init_mm, pgd, addr);
91964+ pud = pud_alloc_kernel(&init_mm, pgd, addr);
91965 if (!pud)
91966 return -ENOMEM;
91967 do {
91968diff --git a/lib/is_single_threaded.c b/lib/is_single_threaded.c
91969index bd2bea9..6b3c95e 100644
91970--- a/lib/is_single_threaded.c
91971+++ b/lib/is_single_threaded.c
91972@@ -22,6 +22,9 @@ bool current_is_single_threaded(void)
91973 struct task_struct *p, *t;
91974 bool ret;
91975
91976+ if (!mm)
91977+ return true;
91978+
91979 if (atomic_read(&task->signal->live) != 1)
91980 return false;
91981
91982diff --git a/lib/kobject.c b/lib/kobject.c
91983index cb14aea..8c53cdb 100644
91984--- a/lib/kobject.c
91985+++ b/lib/kobject.c
91986@@ -931,9 +931,9 @@ EXPORT_SYMBOL_GPL(kset_create_and_add);
91987
91988
91989 static DEFINE_SPINLOCK(kobj_ns_type_lock);
91990-static const struct kobj_ns_type_operations *kobj_ns_ops_tbl[KOBJ_NS_TYPES];
91991+static const struct kobj_ns_type_operations *kobj_ns_ops_tbl[KOBJ_NS_TYPES] __read_only;
91992
91993-int kobj_ns_type_register(const struct kobj_ns_type_operations *ops)
91994+int __init kobj_ns_type_register(const struct kobj_ns_type_operations *ops)
91995 {
91996 enum kobj_ns_type type = ops->type;
91997 int error;
91998diff --git a/lib/list_debug.c b/lib/list_debug.c
91999index c24c2f7..f0296f4 100644
92000--- a/lib/list_debug.c
92001+++ b/lib/list_debug.c
92002@@ -11,7 +11,9 @@
92003 #include <linux/bug.h>
92004 #include <linux/kernel.h>
92005 #include <linux/rculist.h>
92006+#include <linux/mm.h>
92007
92008+#ifdef CONFIG_DEBUG_LIST
92009 /*
92010 * Insert a new entry between two known consecutive entries.
92011 *
92012@@ -19,21 +21,40 @@
92013 * the prev/next entries already!
92014 */
92015
92016+static bool __list_add_debug(struct list_head *new,
92017+ struct list_head *prev,
92018+ struct list_head *next)
92019+{
92020+ if (unlikely(next->prev != prev)) {
92021+ printk(KERN_ERR "list_add corruption. next->prev should be "
92022+ "prev (%p), but was %p. (next=%p).\n",
92023+ prev, next->prev, next);
92024+ BUG();
92025+ return false;
92026+ }
92027+ if (unlikely(prev->next != next)) {
92028+ printk(KERN_ERR "list_add corruption. prev->next should be "
92029+ "next (%p), but was %p. (prev=%p).\n",
92030+ next, prev->next, prev);
92031+ BUG();
92032+ return false;
92033+ }
92034+ if (unlikely(new == prev || new == next)) {
92035+ printk(KERN_ERR "list_add double add: new=%p, prev=%p, next=%p.\n",
92036+ new, prev, next);
92037+ BUG();
92038+ return false;
92039+ }
92040+ return true;
92041+}
92042+
92043 void __list_add(struct list_head *new,
92044- struct list_head *prev,
92045- struct list_head *next)
92046+ struct list_head *prev,
92047+ struct list_head *next)
92048 {
92049- WARN(next->prev != prev,
92050- "list_add corruption. next->prev should be "
92051- "prev (%p), but was %p. (next=%p).\n",
92052- prev, next->prev, next);
92053- WARN(prev->next != next,
92054- "list_add corruption. prev->next should be "
92055- "next (%p), but was %p. (prev=%p).\n",
92056- next, prev->next, prev);
92057- WARN(new == prev || new == next,
92058- "list_add double add: new=%p, prev=%p, next=%p.\n",
92059- new, prev, next);
92060+ if (!__list_add_debug(new, prev, next))
92061+ return;
92062+
92063 next->prev = new;
92064 new->next = next;
92065 new->prev = prev;
92066@@ -41,28 +62,46 @@ void __list_add(struct list_head *new,
92067 }
92068 EXPORT_SYMBOL(__list_add);
92069
92070-void __list_del_entry(struct list_head *entry)
92071+static bool __list_del_entry_debug(struct list_head *entry)
92072 {
92073 struct list_head *prev, *next;
92074
92075 prev = entry->prev;
92076 next = entry->next;
92077
92078- if (WARN(next == LIST_POISON1,
92079- "list_del corruption, %p->next is LIST_POISON1 (%p)\n",
92080- entry, LIST_POISON1) ||
92081- WARN(prev == LIST_POISON2,
92082- "list_del corruption, %p->prev is LIST_POISON2 (%p)\n",
92083- entry, LIST_POISON2) ||
92084- WARN(prev->next != entry,
92085- "list_del corruption. prev->next should be %p, "
92086- "but was %p\n", entry, prev->next) ||
92087- WARN(next->prev != entry,
92088- "list_del corruption. next->prev should be %p, "
92089- "but was %p\n", entry, next->prev))
92090+ if (unlikely(next == LIST_POISON1)) {
92091+ printk(KERN_ERR "list_del corruption, %p->next is LIST_POISON1 (%p)\n",
92092+ entry, LIST_POISON1);
92093+ BUG();
92094+ return false;
92095+ }
92096+ if (unlikely(prev == LIST_POISON2)) {
92097+ printk(KERN_ERR "list_del corruption, %p->prev is LIST_POISON2 (%p)\n",
92098+ entry, LIST_POISON2);
92099+ BUG();
92100+ return false;
92101+ }
92102+ if (unlikely(entry->prev->next != entry)) {
92103+ printk(KERN_ERR "list_del corruption. prev->next should be %p, "
92104+ "but was %p\n", entry, prev->next);
92105+ BUG();
92106+ return false;
92107+ }
92108+ if (unlikely(entry->next->prev != entry)) {
92109+ printk(KERN_ERR "list_del corruption. next->prev should be %p, "
92110+ "but was %p\n", entry, next->prev);
92111+ BUG();
92112+ return false;
92113+ }
92114+ return true;
92115+}
92116+
92117+void __list_del_entry(struct list_head *entry)
92118+{
92119+ if (!__list_del_entry_debug(entry))
92120 return;
92121
92122- __list_del(prev, next);
92123+ __list_del(entry->prev, entry->next);
92124 }
92125 EXPORT_SYMBOL(__list_del_entry);
92126
92127@@ -86,15 +125,85 @@ EXPORT_SYMBOL(list_del);
92128 void __list_add_rcu(struct list_head *new,
92129 struct list_head *prev, struct list_head *next)
92130 {
92131- WARN(next->prev != prev,
92132- "list_add_rcu corruption. next->prev should be prev (%p), but was %p. (next=%p).\n",
92133- prev, next->prev, next);
92134- WARN(prev->next != next,
92135- "list_add_rcu corruption. prev->next should be next (%p), but was %p. (prev=%p).\n",
92136- next, prev->next, prev);
92137+ if (!__list_add_debug(new, prev, next))
92138+ return;
92139+
92140 new->next = next;
92141 new->prev = prev;
92142 rcu_assign_pointer(list_next_rcu(prev), new);
92143 next->prev = new;
92144 }
92145 EXPORT_SYMBOL(__list_add_rcu);
92146+#endif
92147+
92148+void __pax_list_add(struct list_head *new, struct list_head *prev, struct list_head *next)
92149+{
92150+#ifdef CONFIG_DEBUG_LIST
92151+ if (!__list_add_debug(new, prev, next))
92152+ return;
92153+#endif
92154+
92155+ pax_open_kernel();
92156+ next->prev = new;
92157+ new->next = next;
92158+ new->prev = prev;
92159+ prev->next = new;
92160+ pax_close_kernel();
92161+}
92162+EXPORT_SYMBOL(__pax_list_add);
92163+
92164+void pax_list_del(struct list_head *entry)
92165+{
92166+#ifdef CONFIG_DEBUG_LIST
92167+ if (!__list_del_entry_debug(entry))
92168+ return;
92169+#endif
92170+
92171+ pax_open_kernel();
92172+ __list_del(entry->prev, entry->next);
92173+ entry->next = LIST_POISON1;
92174+ entry->prev = LIST_POISON2;
92175+ pax_close_kernel();
92176+}
92177+EXPORT_SYMBOL(pax_list_del);
92178+
92179+void pax_list_del_init(struct list_head *entry)
92180+{
92181+ pax_open_kernel();
92182+ __list_del(entry->prev, entry->next);
92183+ INIT_LIST_HEAD(entry);
92184+ pax_close_kernel();
92185+}
92186+EXPORT_SYMBOL(pax_list_del_init);
92187+
92188+void __pax_list_add_rcu(struct list_head *new,
92189+ struct list_head *prev, struct list_head *next)
92190+{
92191+#ifdef CONFIG_DEBUG_LIST
92192+ if (!__list_add_debug(new, prev, next))
92193+ return;
92194+#endif
92195+
92196+ pax_open_kernel();
92197+ new->next = next;
92198+ new->prev = prev;
92199+ rcu_assign_pointer(list_next_rcu(prev), new);
92200+ next->prev = new;
92201+ pax_close_kernel();
92202+}
92203+EXPORT_SYMBOL(__pax_list_add_rcu);
92204+
92205+void pax_list_del_rcu(struct list_head *entry)
92206+{
92207+#ifdef CONFIG_DEBUG_LIST
92208+ if (!__list_del_entry_debug(entry))
92209+ return;
92210+#endif
92211+
92212+ pax_open_kernel();
92213+ __list_del(entry->prev, entry->next);
92214+ entry->next = LIST_POISON1;
92215+ entry->prev = LIST_POISON2;
92216+ pax_close_kernel();
92217+}
92218+EXPORT_SYMBOL(pax_list_del_rcu);
92219diff --git a/lib/percpu-refcount.c b/lib/percpu-refcount.c
92220index 963b703..438bc51 100644
92221--- a/lib/percpu-refcount.c
92222+++ b/lib/percpu-refcount.c
92223@@ -29,7 +29,7 @@
92224 * can't hit 0 before we've added up all the percpu refs.
92225 */
92226
92227-#define PCPU_COUNT_BIAS (1U << 31)
92228+#define PCPU_COUNT_BIAS (1U << 30)
92229
92230 /**
92231 * percpu_ref_init - initialize a percpu refcount
92232diff --git a/lib/radix-tree.c b/lib/radix-tree.c
92233index bd4a8df..9e4804f 100644
92234--- a/lib/radix-tree.c
92235+++ b/lib/radix-tree.c
92236@@ -93,7 +93,7 @@ struct radix_tree_preload {
92237 int nr;
92238 struct radix_tree_node *nodes[RADIX_TREE_PRELOAD_SIZE];
92239 };
92240-static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
92241+static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
92242
92243 static inline void *ptr_to_indirect(void *ptr)
92244 {
92245diff --git a/lib/random32.c b/lib/random32.c
92246index 6148967..009bfe8 100644
92247--- a/lib/random32.c
92248+++ b/lib/random32.c
92249@@ -44,7 +44,7 @@
92250 static void __init prandom_state_selftest(void);
92251 #endif
92252
92253-static DEFINE_PER_CPU(struct rnd_state, net_rand_state);
92254+static DEFINE_PER_CPU(struct rnd_state, net_rand_state) __latent_entropy;
92255
92256 /**
92257 * prandom_u32_state - seeded pseudo-random number generator.
92258diff --git a/lib/rbtree.c b/lib/rbtree.c
92259index 65f4eff..2cfa167 100644
92260--- a/lib/rbtree.c
92261+++ b/lib/rbtree.c
92262@@ -380,7 +380,9 @@ static inline void dummy_copy(struct rb_node *old, struct rb_node *new) {}
92263 static inline void dummy_rotate(struct rb_node *old, struct rb_node *new) {}
92264
92265 static const struct rb_augment_callbacks dummy_callbacks = {
92266- dummy_propagate, dummy_copy, dummy_rotate
92267+ .propagate = dummy_propagate,
92268+ .copy = dummy_copy,
92269+ .rotate = dummy_rotate
92270 };
92271
92272 void rb_insert_color(struct rb_node *node, struct rb_root *root)
92273diff --git a/lib/show_mem.c b/lib/show_mem.c
92274index 0922579..9d7adb9 100644
92275--- a/lib/show_mem.c
92276+++ b/lib/show_mem.c
92277@@ -44,6 +44,6 @@ void show_mem(unsigned int filter)
92278 quicklist_total_size());
92279 #endif
92280 #ifdef CONFIG_MEMORY_FAILURE
92281- printk("%lu pages hwpoisoned\n", atomic_long_read(&num_poisoned_pages));
92282+ printk("%lu pages hwpoisoned\n", atomic_long_read_unchecked(&num_poisoned_pages));
92283 #endif
92284 }
92285diff --git a/lib/strncpy_from_user.c b/lib/strncpy_from_user.c
92286index bb2b201..46abaf9 100644
92287--- a/lib/strncpy_from_user.c
92288+++ b/lib/strncpy_from_user.c
92289@@ -21,7 +21,7 @@
92290 */
92291 static inline long do_strncpy_from_user(char *dst, const char __user *src, long count, unsigned long max)
92292 {
92293- const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
92294+ static const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
92295 long res = 0;
92296
92297 /*
92298diff --git a/lib/strnlen_user.c b/lib/strnlen_user.c
92299index a28df52..3d55877 100644
92300--- a/lib/strnlen_user.c
92301+++ b/lib/strnlen_user.c
92302@@ -26,7 +26,7 @@
92303 */
92304 static inline long do_strnlen_user(const char __user *src, unsigned long count, unsigned long max)
92305 {
92306- const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
92307+ static const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
92308 long align, res = 0;
92309 unsigned long c;
92310
92311diff --git a/lib/swiotlb.c b/lib/swiotlb.c
92312index b604b83..c0547f6 100644
92313--- a/lib/swiotlb.c
92314+++ b/lib/swiotlb.c
92315@@ -674,7 +674,7 @@ EXPORT_SYMBOL(swiotlb_alloc_coherent);
92316
92317 void
92318 swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
92319- dma_addr_t dev_addr)
92320+ dma_addr_t dev_addr, struct dma_attrs *attrs)
92321 {
92322 phys_addr_t paddr = dma_to_phys(hwdev, dev_addr);
92323
92324diff --git a/lib/usercopy.c b/lib/usercopy.c
92325index 4f5b1dd..7cab418 100644
92326--- a/lib/usercopy.c
92327+++ b/lib/usercopy.c
92328@@ -7,3 +7,9 @@ void copy_from_user_overflow(void)
92329 WARN(1, "Buffer overflow detected!\n");
92330 }
92331 EXPORT_SYMBOL(copy_from_user_overflow);
92332+
92333+void copy_to_user_overflow(void)
92334+{
92335+ WARN(1, "Buffer overflow detected!\n");
92336+}
92337+EXPORT_SYMBOL(copy_to_user_overflow);
92338diff --git a/lib/vsprintf.c b/lib/vsprintf.c
92339index 185b6d3..823c48c 100644
92340--- a/lib/vsprintf.c
92341+++ b/lib/vsprintf.c
92342@@ -16,6 +16,9 @@
92343 * - scnprintf and vscnprintf
92344 */
92345
92346+#ifdef CONFIG_GRKERNSEC_HIDESYM
92347+#define __INCLUDED_BY_HIDESYM 1
92348+#endif
92349 #include <stdarg.h>
92350 #include <linux/module.h> /* for KSYM_SYMBOL_LEN */
92351 #include <linux/types.h>
92352@@ -1179,7 +1182,11 @@ char *address_val(char *buf, char *end, const void *addr,
92353 return number(buf, end, num, spec);
92354 }
92355
92356+#ifdef CONFIG_GRKERNSEC_HIDESYM
92357+int kptr_restrict __read_mostly = 2;
92358+#else
92359 int kptr_restrict __read_mostly;
92360+#endif
92361
92362 /*
92363 * Show a '%p' thing. A kernel extension is that the '%p' is followed
92364@@ -1192,6 +1199,7 @@ int kptr_restrict __read_mostly;
92365 * - 'f' For simple symbolic function names without offset
92366 * - 'S' For symbolic direct pointers with offset
92367 * - 's' For symbolic direct pointers without offset
92368+ * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM
92369 * - '[FfSs]R' as above with __builtin_extract_return_addr() translation
92370 * - 'B' For backtraced symbolic direct pointers with offset
92371 * - 'R' For decoded struct resource, e.g., [mem 0x0-0x1f 64bit pref]
92372@@ -1259,12 +1267,12 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
92373
92374 if (!ptr && *fmt != 'K') {
92375 /*
92376- * Print (null) with the same width as a pointer so it makes
92377+ * Print (nil) with the same width as a pointer so it makes
92378 * tabular output look nice.
92379 */
92380 if (spec.field_width == -1)
92381 spec.field_width = default_width;
92382- return string(buf, end, "(null)", spec);
92383+ return string(buf, end, "(nil)", spec);
92384 }
92385
92386 switch (*fmt) {
92387@@ -1274,6 +1282,12 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
92388 /* Fallthrough */
92389 case 'S':
92390 case 's':
92391+#ifdef CONFIG_GRKERNSEC_HIDESYM
92392+ break;
92393+#else
92394+ return symbol_string(buf, end, ptr, spec, fmt);
92395+#endif
92396+ case 'A':
92397 case 'B':
92398 return symbol_string(buf, end, ptr, spec, fmt);
92399 case 'R':
92400@@ -1329,6 +1343,8 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
92401 va_end(va);
92402 return buf;
92403 }
92404+ case 'P':
92405+ break;
92406 case 'K':
92407 /*
92408 * %pK cannot be used in IRQ context because its test
92409@@ -1386,6 +1402,21 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
92410 ((const struct file *)ptr)->f_path.dentry,
92411 spec, fmt);
92412 }
92413+
92414+#ifdef CONFIG_GRKERNSEC_HIDESYM
92415+ /* 'P' = approved pointers to copy to userland,
92416+ as in the /proc/kallsyms case, as we make it display nothing
92417+ for non-root users, and the real contents for root users
92418+ Also ignore 'K' pointers, since we force their NULLing for non-root users
92419+ above
92420+ */
92421+ if ((unsigned long)ptr > TASK_SIZE && *fmt != 'P' && *fmt != 'K' && is_usercopy_object(buf)) {
92422+ printk(KERN_ALERT "grsec: kernel infoleak detected! Please report this log to spender@grsecurity.net.\n");
92423+ dump_stack();
92424+ ptr = NULL;
92425+ }
92426+#endif
92427+
92428 spec.flags |= SMALL;
92429 if (spec.field_width == -1) {
92430 spec.field_width = default_width;
92431@@ -2107,11 +2138,11 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
92432 typeof(type) value; \
92433 if (sizeof(type) == 8) { \
92434 args = PTR_ALIGN(args, sizeof(u32)); \
92435- *(u32 *)&value = *(u32 *)args; \
92436- *((u32 *)&value + 1) = *(u32 *)(args + 4); \
92437+ *(u32 *)&value = *(const u32 *)args; \
92438+ *((u32 *)&value + 1) = *(const u32 *)(args + 4); \
92439 } else { \
92440 args = PTR_ALIGN(args, sizeof(type)); \
92441- value = *(typeof(type) *)args; \
92442+ value = *(const typeof(type) *)args; \
92443 } \
92444 args += sizeof(type); \
92445 value; \
92446@@ -2174,7 +2205,7 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
92447 case FORMAT_TYPE_STR: {
92448 const char *str_arg = args;
92449 args += strlen(str_arg) + 1;
92450- str = string(str, end, (char *)str_arg, spec);
92451+ str = string(str, end, str_arg, spec);
92452 break;
92453 }
92454
92455diff --git a/localversion-grsec b/localversion-grsec
92456new file mode 100644
92457index 0000000..7cd6065
92458--- /dev/null
92459+++ b/localversion-grsec
92460@@ -0,0 +1 @@
92461+-grsec
92462diff --git a/mm/Kconfig b/mm/Kconfig
92463index 2888024..c15a810 100644
92464--- a/mm/Kconfig
92465+++ b/mm/Kconfig
92466@@ -326,10 +326,11 @@ config KSM
92467 root has set /sys/kernel/mm/ksm/run to 1 (if CONFIG_SYSFS is set).
92468
92469 config DEFAULT_MMAP_MIN_ADDR
92470- int "Low address space to protect from user allocation"
92471+ int "Low address space to protect from user allocation"
92472 depends on MMU
92473- default 4096
92474- help
92475+ default 32768 if ALPHA || ARM || PARISC || SPARC32
92476+ default 65536
92477+ help
92478 This is the portion of low virtual memory which should be protected
92479 from userspace allocation. Keeping a user from writing to low pages
92480 can help reduce the impact of kernel NULL pointer bugs.
92481@@ -360,7 +361,7 @@ config MEMORY_FAILURE
92482
92483 config HWPOISON_INJECT
92484 tristate "HWPoison pages injector"
92485- depends on MEMORY_FAILURE && DEBUG_KERNEL && PROC_FS
92486+ depends on MEMORY_FAILURE && DEBUG_KERNEL && PROC_FS && !GRKERNSEC
92487 select PROC_PAGE_MONITOR
92488
92489 config NOMMU_INITIAL_TRIM_EXCESS
92490diff --git a/mm/backing-dev.c b/mm/backing-dev.c
92491index 09d9591..165bb75 100644
92492--- a/mm/backing-dev.c
92493+++ b/mm/backing-dev.c
92494@@ -12,7 +12,7 @@
92495 #include <linux/device.h>
92496 #include <trace/events/writeback.h>
92497
92498-static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0);
92499+static atomic_long_unchecked_t bdi_seq = ATOMIC_LONG_INIT(0);
92500
92501 struct backing_dev_info default_backing_dev_info = {
92502 .name = "default",
92503@@ -533,7 +533,7 @@ int bdi_setup_and_register(struct backing_dev_info *bdi, char *name,
92504 return err;
92505
92506 err = bdi_register(bdi, NULL, "%.28s-%ld", name,
92507- atomic_long_inc_return(&bdi_seq));
92508+ atomic_long_inc_return_unchecked(&bdi_seq));
92509 if (err) {
92510 bdi_destroy(bdi);
92511 return err;
92512diff --git a/mm/filemap.c b/mm/filemap.c
92513index 7a13f6a..e31738b 100644
92514--- a/mm/filemap.c
92515+++ b/mm/filemap.c
92516@@ -192,9 +192,11 @@ static int filemap_check_errors(struct address_space *mapping)
92517 {
92518 int ret = 0;
92519 /* Check for outstanding write errors */
92520- if (test_and_clear_bit(AS_ENOSPC, &mapping->flags))
92521+ if (test_bit(AS_ENOSPC, &mapping->flags) &&
92522+ test_and_clear_bit(AS_ENOSPC, &mapping->flags))
92523 ret = -ENOSPC;
92524- if (test_and_clear_bit(AS_EIO, &mapping->flags))
92525+ if (test_bit(AS_EIO, &mapping->flags) &&
92526+ test_and_clear_bit(AS_EIO, &mapping->flags))
92527 ret = -EIO;
92528 return ret;
92529 }
92530@@ -1766,7 +1768,7 @@ int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
92531 struct address_space *mapping = file->f_mapping;
92532
92533 if (!mapping->a_ops->readpage)
92534- return -ENOEXEC;
92535+ return -ENODEV;
92536 file_accessed(file);
92537 vma->vm_ops = &generic_file_vm_ops;
92538 return 0;
92539@@ -1948,7 +1950,7 @@ static size_t __iovec_copy_from_user_inatomic(char *vaddr,
92540
92541 while (bytes) {
92542 char __user *buf = iov->iov_base + base;
92543- int copy = min(bytes, iov->iov_len - base);
92544+ size_t copy = min(bytes, iov->iov_len - base);
92545
92546 base = 0;
92547 left = __copy_from_user_inatomic(vaddr, buf, copy);
92548@@ -1977,7 +1979,7 @@ size_t iov_iter_copy_from_user_atomic(struct page *page,
92549 BUG_ON(!in_atomic());
92550 kaddr = kmap_atomic(page);
92551 if (likely(i->nr_segs == 1)) {
92552- int left;
92553+ size_t left;
92554 char __user *buf = i->iov->iov_base + i->iov_offset;
92555 left = __copy_from_user_inatomic(kaddr + offset, buf, bytes);
92556 copied = bytes - left;
92557@@ -2005,7 +2007,7 @@ size_t iov_iter_copy_from_user(struct page *page,
92558
92559 kaddr = kmap(page);
92560 if (likely(i->nr_segs == 1)) {
92561- int left;
92562+ size_t left;
92563 char __user *buf = i->iov->iov_base + i->iov_offset;
92564 left = __copy_from_user(kaddr + offset, buf, bytes);
92565 copied = bytes - left;
92566@@ -2035,7 +2037,7 @@ void iov_iter_advance(struct iov_iter *i, size_t bytes)
92567 * zero-length segments (without overruning the iovec).
92568 */
92569 while (bytes || unlikely(i->count && !iov->iov_len)) {
92570- int copy;
92571+ size_t copy;
92572
92573 copy = min(bytes, iov->iov_len - base);
92574 BUG_ON(!i->count || i->count < copy);
92575@@ -2106,6 +2108,7 @@ inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, i
92576 *pos = i_size_read(inode);
92577
92578 if (limit != RLIM_INFINITY) {
92579+ gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
92580 if (*pos >= limit) {
92581 send_sig(SIGXFSZ, current, 0);
92582 return -EFBIG;
92583diff --git a/mm/fremap.c b/mm/fremap.c
92584index 34feba6..315fe78 100644
92585--- a/mm/fremap.c
92586+++ b/mm/fremap.c
92587@@ -179,6 +179,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
92588 retry:
92589 vma = find_vma(mm, start);
92590
92591+#ifdef CONFIG_PAX_SEGMEXEC
92592+ if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
92593+ goto out;
92594+#endif
92595+
92596 /*
92597 * Make sure the vma is shared, that it supports prefaulting,
92598 * and that the remapped range is valid and fully within
92599diff --git a/mm/highmem.c b/mm/highmem.c
92600index b32b70c..e512eb0 100644
92601--- a/mm/highmem.c
92602+++ b/mm/highmem.c
92603@@ -138,8 +138,9 @@ static void flush_all_zero_pkmaps(void)
92604 * So no dangers, even with speculative execution.
92605 */
92606 page = pte_page(pkmap_page_table[i]);
92607+ pax_open_kernel();
92608 pte_clear(&init_mm, PKMAP_ADDR(i), &pkmap_page_table[i]);
92609-
92610+ pax_close_kernel();
92611 set_page_address(page, NULL);
92612 need_flush = 1;
92613 }
92614@@ -198,9 +199,11 @@ start:
92615 }
92616 }
92617 vaddr = PKMAP_ADDR(last_pkmap_nr);
92618+
92619+ pax_open_kernel();
92620 set_pte_at(&init_mm, vaddr,
92621 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
92622-
92623+ pax_close_kernel();
92624 pkmap_count[last_pkmap_nr] = 1;
92625 set_page_address(page, (void *)vaddr);
92626
92627diff --git a/mm/hugetlb.c b/mm/hugetlb.c
92628index 06a9bc0..cfbba83 100644
92629--- a/mm/hugetlb.c
92630+++ b/mm/hugetlb.c
92631@@ -2070,15 +2070,17 @@ static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
92632 struct hstate *h = &default_hstate;
92633 unsigned long tmp;
92634 int ret;
92635+ ctl_table_no_const hugetlb_table;
92636
92637 tmp = h->max_huge_pages;
92638
92639 if (write && h->order >= MAX_ORDER)
92640 return -EINVAL;
92641
92642- table->data = &tmp;
92643- table->maxlen = sizeof(unsigned long);
92644- ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
92645+ hugetlb_table = *table;
92646+ hugetlb_table.data = &tmp;
92647+ hugetlb_table.maxlen = sizeof(unsigned long);
92648+ ret = proc_doulongvec_minmax(&hugetlb_table, write, buffer, length, ppos);
92649 if (ret)
92650 goto out;
92651
92652@@ -2123,15 +2125,17 @@ int hugetlb_overcommit_handler(struct ctl_table *table, int write,
92653 struct hstate *h = &default_hstate;
92654 unsigned long tmp;
92655 int ret;
92656+ ctl_table_no_const hugetlb_table;
92657
92658 tmp = h->nr_overcommit_huge_pages;
92659
92660 if (write && h->order >= MAX_ORDER)
92661 return -EINVAL;
92662
92663- table->data = &tmp;
92664- table->maxlen = sizeof(unsigned long);
92665- ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
92666+ hugetlb_table = *table;
92667+ hugetlb_table.data = &tmp;
92668+ hugetlb_table.maxlen = sizeof(unsigned long);
92669+ ret = proc_doulongvec_minmax(&hugetlb_table, write, buffer, length, ppos);
92670 if (ret)
92671 goto out;
92672
92673@@ -2600,6 +2604,27 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
92674 return 1;
92675 }
92676
92677+#ifdef CONFIG_PAX_SEGMEXEC
92678+static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
92679+{
92680+ struct mm_struct *mm = vma->vm_mm;
92681+ struct vm_area_struct *vma_m;
92682+ unsigned long address_m;
92683+ pte_t *ptep_m;
92684+
92685+ vma_m = pax_find_mirror_vma(vma);
92686+ if (!vma_m)
92687+ return;
92688+
92689+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
92690+ address_m = address + SEGMEXEC_TASK_SIZE;
92691+ ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
92692+ get_page(page_m);
92693+ hugepage_add_anon_rmap(page_m, vma_m, address_m);
92694+ set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
92695+}
92696+#endif
92697+
92698 /*
92699 * Hugetlb_cow() should be called with page lock of the original hugepage held.
92700 * Called with hugetlb_instantiation_mutex held and pte_page locked so we
92701@@ -2716,6 +2741,11 @@ retry_avoidcopy:
92702 make_huge_pte(vma, new_page, 1));
92703 page_remove_rmap(old_page);
92704 hugepage_add_new_anon_rmap(new_page, vma, address);
92705+
92706+#ifdef CONFIG_PAX_SEGMEXEC
92707+ pax_mirror_huge_pte(vma, address, new_page);
92708+#endif
92709+
92710 /* Make the old page be freed below */
92711 new_page = old_page;
92712 }
92713@@ -2880,6 +2910,10 @@ retry:
92714 && (vma->vm_flags & VM_SHARED)));
92715 set_huge_pte_at(mm, address, ptep, new_pte);
92716
92717+#ifdef CONFIG_PAX_SEGMEXEC
92718+ pax_mirror_huge_pte(vma, address, page);
92719+#endif
92720+
92721 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
92722 /* Optimization, do the COW without a second fault */
92723 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page, ptl);
92724@@ -2910,6 +2944,10 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
92725 static DEFINE_MUTEX(hugetlb_instantiation_mutex);
92726 struct hstate *h = hstate_vma(vma);
92727
92728+#ifdef CONFIG_PAX_SEGMEXEC
92729+ struct vm_area_struct *vma_m;
92730+#endif
92731+
92732 address &= huge_page_mask(h);
92733
92734 ptep = huge_pte_offset(mm, address);
92735@@ -2923,6 +2961,26 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
92736 VM_FAULT_SET_HINDEX(hstate_index(h));
92737 }
92738
92739+#ifdef CONFIG_PAX_SEGMEXEC
92740+ vma_m = pax_find_mirror_vma(vma);
92741+ if (vma_m) {
92742+ unsigned long address_m;
92743+
92744+ if (vma->vm_start > vma_m->vm_start) {
92745+ address_m = address;
92746+ address -= SEGMEXEC_TASK_SIZE;
92747+ vma = vma_m;
92748+ h = hstate_vma(vma);
92749+ } else
92750+ address_m = address + SEGMEXEC_TASK_SIZE;
92751+
92752+ if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
92753+ return VM_FAULT_OOM;
92754+ address_m &= HPAGE_MASK;
92755+ unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
92756+ }
92757+#endif
92758+
92759 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
92760 if (!ptep)
92761 return VM_FAULT_OOM;
92762diff --git a/mm/internal.h b/mm/internal.h
92763index 3e91000..4741a60 100644
92764--- a/mm/internal.h
92765+++ b/mm/internal.h
92766@@ -94,6 +94,7 @@ extern pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address);
92767 * in mm/page_alloc.c
92768 */
92769 extern void __free_pages_bootmem(struct page *page, unsigned int order);
92770+extern void free_compound_page(struct page *page);
92771 extern void prep_compound_page(struct page *page, unsigned long order);
92772 #ifdef CONFIG_MEMORY_FAILURE
92773 extern bool is_free_buddy_page(struct page *page);
92774@@ -352,7 +353,7 @@ extern u32 hwpoison_filter_enable;
92775
92776 extern unsigned long vm_mmap_pgoff(struct file *, unsigned long,
92777 unsigned long, unsigned long,
92778- unsigned long, unsigned long);
92779+ unsigned long, unsigned long) __intentional_overflow(-1);
92780
92781 extern void set_pageblock_order(void);
92782 unsigned long reclaim_clean_pages_from_list(struct zone *zone,
92783diff --git a/mm/kmemleak.c b/mm/kmemleak.c
92784index 31f01c5..7015178 100644
92785--- a/mm/kmemleak.c
92786+++ b/mm/kmemleak.c
92787@@ -363,7 +363,7 @@ static void print_unreferenced(struct seq_file *seq,
92788
92789 for (i = 0; i < object->trace_len; i++) {
92790 void *ptr = (void *)object->trace[i];
92791- seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
92792+ seq_printf(seq, " [<%pP>] %pA\n", ptr, ptr);
92793 }
92794 }
92795
92796@@ -1853,7 +1853,7 @@ static int __init kmemleak_late_init(void)
92797 return -ENOMEM;
92798 }
92799
92800- dentry = debugfs_create_file("kmemleak", S_IRUGO, NULL, NULL,
92801+ dentry = debugfs_create_file("kmemleak", S_IRUSR, NULL, NULL,
92802 &kmemleak_fops);
92803 if (!dentry)
92804 pr_warning("Failed to create the debugfs kmemleak file\n");
92805diff --git a/mm/maccess.c b/mm/maccess.c
92806index d53adf9..03a24bf 100644
92807--- a/mm/maccess.c
92808+++ b/mm/maccess.c
92809@@ -26,7 +26,7 @@ long __probe_kernel_read(void *dst, const void *src, size_t size)
92810 set_fs(KERNEL_DS);
92811 pagefault_disable();
92812 ret = __copy_from_user_inatomic(dst,
92813- (__force const void __user *)src, size);
92814+ (const void __force_user *)src, size);
92815 pagefault_enable();
92816 set_fs(old_fs);
92817
92818@@ -53,7 +53,7 @@ long __probe_kernel_write(void *dst, const void *src, size_t size)
92819
92820 set_fs(KERNEL_DS);
92821 pagefault_disable();
92822- ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
92823+ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
92824 pagefault_enable();
92825 set_fs(old_fs);
92826
92827diff --git a/mm/madvise.c b/mm/madvise.c
92828index 539eeb9..e24a987 100644
92829--- a/mm/madvise.c
92830+++ b/mm/madvise.c
92831@@ -51,6 +51,10 @@ static long madvise_behavior(struct vm_area_struct *vma,
92832 pgoff_t pgoff;
92833 unsigned long new_flags = vma->vm_flags;
92834
92835+#ifdef CONFIG_PAX_SEGMEXEC
92836+ struct vm_area_struct *vma_m;
92837+#endif
92838+
92839 switch (behavior) {
92840 case MADV_NORMAL:
92841 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
92842@@ -126,6 +130,13 @@ success:
92843 /*
92844 * vm_flags is protected by the mmap_sem held in write mode.
92845 */
92846+
92847+#ifdef CONFIG_PAX_SEGMEXEC
92848+ vma_m = pax_find_mirror_vma(vma);
92849+ if (vma_m)
92850+ vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
92851+#endif
92852+
92853 vma->vm_flags = new_flags;
92854
92855 out:
92856@@ -274,6 +285,11 @@ static long madvise_dontneed(struct vm_area_struct *vma,
92857 struct vm_area_struct **prev,
92858 unsigned long start, unsigned long end)
92859 {
92860+
92861+#ifdef CONFIG_PAX_SEGMEXEC
92862+ struct vm_area_struct *vma_m;
92863+#endif
92864+
92865 *prev = vma;
92866 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
92867 return -EINVAL;
92868@@ -286,6 +302,21 @@ static long madvise_dontneed(struct vm_area_struct *vma,
92869 zap_page_range(vma, start, end - start, &details);
92870 } else
92871 zap_page_range(vma, start, end - start, NULL);
92872+
92873+#ifdef CONFIG_PAX_SEGMEXEC
92874+ vma_m = pax_find_mirror_vma(vma);
92875+ if (vma_m) {
92876+ if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
92877+ struct zap_details details = {
92878+ .nonlinear_vma = vma_m,
92879+ .last_index = ULONG_MAX,
92880+ };
92881+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, &details);
92882+ } else
92883+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
92884+ }
92885+#endif
92886+
92887 return 0;
92888 }
92889
92890@@ -491,6 +522,16 @@ SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
92891 if (end < start)
92892 return error;
92893
92894+#ifdef CONFIG_PAX_SEGMEXEC
92895+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
92896+ if (end > SEGMEXEC_TASK_SIZE)
92897+ return error;
92898+ } else
92899+#endif
92900+
92901+ if (end > TASK_SIZE)
92902+ return error;
92903+
92904 error = 0;
92905 if (end == start)
92906 return error;
92907diff --git a/mm/memory-failure.c b/mm/memory-failure.c
92908index 66586bb..73ab487 100644
92909--- a/mm/memory-failure.c
92910+++ b/mm/memory-failure.c
92911@@ -61,7 +61,7 @@ int sysctl_memory_failure_early_kill __read_mostly = 0;
92912
92913 int sysctl_memory_failure_recovery __read_mostly = 1;
92914
92915-atomic_long_t num_poisoned_pages __read_mostly = ATOMIC_LONG_INIT(0);
92916+atomic_long_unchecked_t num_poisoned_pages __read_mostly = ATOMIC_LONG_INIT(0);
92917
92918 #if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE)
92919
92920@@ -202,7 +202,7 @@ static int kill_proc(struct task_struct *t, unsigned long addr, int trapno,
92921 pfn, t->comm, t->pid);
92922 si.si_signo = SIGBUS;
92923 si.si_errno = 0;
92924- si.si_addr = (void *)addr;
92925+ si.si_addr = (void __user *)addr;
92926 #ifdef __ARCH_SI_TRAPNO
92927 si.si_trapno = trapno;
92928 #endif
92929@@ -762,7 +762,7 @@ static struct page_state {
92930 unsigned long res;
92931 char *msg;
92932 int (*action)(struct page *p, unsigned long pfn);
92933-} error_states[] = {
92934+} __do_const error_states[] = {
92935 { reserved, reserved, "reserved kernel", me_kernel },
92936 /*
92937 * free pages are specially detected outside this table:
92938@@ -1062,7 +1062,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
92939 nr_pages = 1 << compound_order(hpage);
92940 else /* normal page or thp */
92941 nr_pages = 1;
92942- atomic_long_add(nr_pages, &num_poisoned_pages);
92943+ atomic_long_add_unchecked(nr_pages, &num_poisoned_pages);
92944
92945 /*
92946 * We need/can do nothing about count=0 pages.
92947@@ -1091,7 +1091,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
92948 if (PageHWPoison(hpage)) {
92949 if ((hwpoison_filter(p) && TestClearPageHWPoison(p))
92950 || (p != hpage && TestSetPageHWPoison(hpage))) {
92951- atomic_long_sub(nr_pages, &num_poisoned_pages);
92952+ atomic_long_sub_unchecked(nr_pages, &num_poisoned_pages);
92953 unlock_page(hpage);
92954 return 0;
92955 }
92956@@ -1162,7 +1162,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
92957 }
92958 if (hwpoison_filter(p)) {
92959 if (TestClearPageHWPoison(p))
92960- atomic_long_sub(nr_pages, &num_poisoned_pages);
92961+ atomic_long_sub_unchecked(nr_pages, &num_poisoned_pages);
92962 unlock_page(hpage);
92963 put_page(hpage);
92964 return 0;
92965@@ -1384,7 +1384,7 @@ int unpoison_memory(unsigned long pfn)
92966 return 0;
92967 }
92968 if (TestClearPageHWPoison(p))
92969- atomic_long_dec(&num_poisoned_pages);
92970+ atomic_long_dec_unchecked(&num_poisoned_pages);
92971 pr_info("MCE: Software-unpoisoned free page %#lx\n", pfn);
92972 return 0;
92973 }
92974@@ -1398,7 +1398,7 @@ int unpoison_memory(unsigned long pfn)
92975 */
92976 if (TestClearPageHWPoison(page)) {
92977 pr_info("MCE: Software-unpoisoned page %#lx\n", pfn);
92978- atomic_long_sub(nr_pages, &num_poisoned_pages);
92979+ atomic_long_sub_unchecked(nr_pages, &num_poisoned_pages);
92980 freeit = 1;
92981 if (PageHuge(page))
92982 clear_page_hwpoison_huge_page(page);
92983@@ -1523,11 +1523,11 @@ static int soft_offline_huge_page(struct page *page, int flags)
92984 if (PageHuge(page)) {
92985 set_page_hwpoison_huge_page(hpage);
92986 dequeue_hwpoisoned_huge_page(hpage);
92987- atomic_long_add(1 << compound_order(hpage),
92988+ atomic_long_add_unchecked(1 << compound_order(hpage),
92989 &num_poisoned_pages);
92990 } else {
92991 SetPageHWPoison(page);
92992- atomic_long_inc(&num_poisoned_pages);
92993+ atomic_long_inc_unchecked(&num_poisoned_pages);
92994 }
92995 }
92996 return ret;
92997@@ -1566,7 +1566,7 @@ static int __soft_offline_page(struct page *page, int flags)
92998 put_page(page);
92999 pr_info("soft_offline: %#lx: invalidated\n", pfn);
93000 SetPageHWPoison(page);
93001- atomic_long_inc(&num_poisoned_pages);
93002+ atomic_long_inc_unchecked(&num_poisoned_pages);
93003 return 0;
93004 }
93005
93006@@ -1617,7 +1617,7 @@ static int __soft_offline_page(struct page *page, int flags)
93007 if (!is_free_buddy_page(page))
93008 pr_info("soft offline: %#lx: page leaked\n",
93009 pfn);
93010- atomic_long_inc(&num_poisoned_pages);
93011+ atomic_long_inc_unchecked(&num_poisoned_pages);
93012 }
93013 } else {
93014 pr_info("soft offline: %#lx: isolation failed: %d, page count %d, type %lx\n",
93015@@ -1691,11 +1691,11 @@ int soft_offline_page(struct page *page, int flags)
93016 if (PageHuge(page)) {
93017 set_page_hwpoison_huge_page(hpage);
93018 dequeue_hwpoisoned_huge_page(hpage);
93019- atomic_long_add(1 << compound_order(hpage),
93020+ atomic_long_add_unchecked(1 << compound_order(hpage),
93021 &num_poisoned_pages);
93022 } else {
93023 SetPageHWPoison(page);
93024- atomic_long_inc(&num_poisoned_pages);
93025+ atomic_long_inc_unchecked(&num_poisoned_pages);
93026 }
93027 }
93028 unset_migratetype_isolate(page, MIGRATE_MOVABLE);
93029diff --git a/mm/memory.c b/mm/memory.c
93030index 49e930f..90d7ec5 100644
93031--- a/mm/memory.c
93032+++ b/mm/memory.c
93033@@ -403,6 +403,7 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
93034 free_pte_range(tlb, pmd, addr);
93035 } while (pmd++, addr = next, addr != end);
93036
93037+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
93038 start &= PUD_MASK;
93039 if (start < floor)
93040 return;
93041@@ -417,6 +418,8 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
93042 pmd = pmd_offset(pud, start);
93043 pud_clear(pud);
93044 pmd_free_tlb(tlb, pmd, start);
93045+#endif
93046+
93047 }
93048
93049 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
93050@@ -436,6 +439,7 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
93051 free_pmd_range(tlb, pud, addr, next, floor, ceiling);
93052 } while (pud++, addr = next, addr != end);
93053
93054+#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
93055 start &= PGDIR_MASK;
93056 if (start < floor)
93057 return;
93058@@ -450,6 +454,8 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
93059 pud = pud_offset(pgd, start);
93060 pgd_clear(pgd);
93061 pud_free_tlb(tlb, pud, start);
93062+#endif
93063+
93064 }
93065
93066 /*
93067@@ -1636,12 +1642,6 @@ no_page_table:
93068 return page;
93069 }
93070
93071-static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
93072-{
93073- return stack_guard_page_start(vma, addr) ||
93074- stack_guard_page_end(vma, addr+PAGE_SIZE);
93075-}
93076-
93077 /**
93078 * __get_user_pages() - pin user pages in memory
93079 * @tsk: task_struct of target task
93080@@ -1728,10 +1728,10 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
93081
93082 i = 0;
93083
93084- do {
93085+ while (nr_pages) {
93086 struct vm_area_struct *vma;
93087
93088- vma = find_extend_vma(mm, start);
93089+ vma = find_vma(mm, start);
93090 if (!vma && in_gate_area(mm, start)) {
93091 unsigned long pg = start & PAGE_MASK;
93092 pgd_t *pgd;
93093@@ -1780,7 +1780,7 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
93094 goto next_page;
93095 }
93096
93097- if (!vma ||
93098+ if (!vma || start < vma->vm_start ||
93099 (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
93100 !(vm_flags & vma->vm_flags))
93101 return i ? : -EFAULT;
93102@@ -1809,11 +1809,6 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
93103 int ret;
93104 unsigned int fault_flags = 0;
93105
93106- /* For mlock, just skip the stack guard page. */
93107- if (foll_flags & FOLL_MLOCK) {
93108- if (stack_guard_page(vma, start))
93109- goto next_page;
93110- }
93111 if (foll_flags & FOLL_WRITE)
93112 fault_flags |= FAULT_FLAG_WRITE;
93113 if (nonblocking)
93114@@ -1893,7 +1888,7 @@ next_page:
93115 start += page_increm * PAGE_SIZE;
93116 nr_pages -= page_increm;
93117 } while (nr_pages && start < vma->vm_end);
93118- } while (nr_pages);
93119+ }
93120 return i;
93121 }
93122 EXPORT_SYMBOL(__get_user_pages);
93123@@ -2105,6 +2100,10 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
93124 page_add_file_rmap(page);
93125 set_pte_at(mm, addr, pte, mk_pte(page, prot));
93126
93127+#ifdef CONFIG_PAX_SEGMEXEC
93128+ pax_mirror_file_pte(vma, addr, page, ptl);
93129+#endif
93130+
93131 retval = 0;
93132 pte_unmap_unlock(pte, ptl);
93133 return retval;
93134@@ -2149,9 +2148,21 @@ int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
93135 if (!page_count(page))
93136 return -EINVAL;
93137 if (!(vma->vm_flags & VM_MIXEDMAP)) {
93138+
93139+#ifdef CONFIG_PAX_SEGMEXEC
93140+ struct vm_area_struct *vma_m;
93141+#endif
93142+
93143 BUG_ON(down_read_trylock(&vma->vm_mm->mmap_sem));
93144 BUG_ON(vma->vm_flags & VM_PFNMAP);
93145 vma->vm_flags |= VM_MIXEDMAP;
93146+
93147+#ifdef CONFIG_PAX_SEGMEXEC
93148+ vma_m = pax_find_mirror_vma(vma);
93149+ if (vma_m)
93150+ vma_m->vm_flags |= VM_MIXEDMAP;
93151+#endif
93152+
93153 }
93154 return insert_page(vma, addr, page, vma->vm_page_prot);
93155 }
93156@@ -2234,6 +2245,7 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
93157 unsigned long pfn)
93158 {
93159 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
93160+ BUG_ON(vma->vm_mirror);
93161
93162 if (addr < vma->vm_start || addr >= vma->vm_end)
93163 return -EFAULT;
93164@@ -2481,7 +2493,9 @@ static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
93165
93166 BUG_ON(pud_huge(*pud));
93167
93168- pmd = pmd_alloc(mm, pud, addr);
93169+ pmd = (mm == &init_mm) ?
93170+ pmd_alloc_kernel(mm, pud, addr) :
93171+ pmd_alloc(mm, pud, addr);
93172 if (!pmd)
93173 return -ENOMEM;
93174 do {
93175@@ -2501,7 +2515,9 @@ static int apply_to_pud_range(struct mm_struct *mm, pgd_t *pgd,
93176 unsigned long next;
93177 int err;
93178
93179- pud = pud_alloc(mm, pgd, addr);
93180+ pud = (mm == &init_mm) ?
93181+ pud_alloc_kernel(mm, pgd, addr) :
93182+ pud_alloc(mm, pgd, addr);
93183 if (!pud)
93184 return -ENOMEM;
93185 do {
93186@@ -2591,6 +2607,186 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo
93187 copy_user_highpage(dst, src, va, vma);
93188 }
93189
93190+#ifdef CONFIG_PAX_SEGMEXEC
93191+static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
93192+{
93193+ struct mm_struct *mm = vma->vm_mm;
93194+ spinlock_t *ptl;
93195+ pte_t *pte, entry;
93196+
93197+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
93198+ entry = *pte;
93199+ if (!pte_present(entry)) {
93200+ if (!pte_none(entry)) {
93201+ BUG_ON(pte_file(entry));
93202+ free_swap_and_cache(pte_to_swp_entry(entry));
93203+ pte_clear_not_present_full(mm, address, pte, 0);
93204+ }
93205+ } else {
93206+ struct page *page;
93207+
93208+ flush_cache_page(vma, address, pte_pfn(entry));
93209+ entry = ptep_clear_flush(vma, address, pte);
93210+ BUG_ON(pte_dirty(entry));
93211+ page = vm_normal_page(vma, address, entry);
93212+ if (page) {
93213+ update_hiwater_rss(mm);
93214+ if (PageAnon(page))
93215+ dec_mm_counter_fast(mm, MM_ANONPAGES);
93216+ else
93217+ dec_mm_counter_fast(mm, MM_FILEPAGES);
93218+ page_remove_rmap(page);
93219+ page_cache_release(page);
93220+ }
93221+ }
93222+ pte_unmap_unlock(pte, ptl);
93223+}
93224+
93225+/* PaX: if vma is mirrored, synchronize the mirror's PTE
93226+ *
93227+ * the ptl of the lower mapped page is held on entry and is not released on exit
93228+ * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
93229+ */
93230+static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
93231+{
93232+ struct mm_struct *mm = vma->vm_mm;
93233+ unsigned long address_m;
93234+ spinlock_t *ptl_m;
93235+ struct vm_area_struct *vma_m;
93236+ pmd_t *pmd_m;
93237+ pte_t *pte_m, entry_m;
93238+
93239+ BUG_ON(!page_m || !PageAnon(page_m));
93240+
93241+ vma_m = pax_find_mirror_vma(vma);
93242+ if (!vma_m)
93243+ return;
93244+
93245+ BUG_ON(!PageLocked(page_m));
93246+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
93247+ address_m = address + SEGMEXEC_TASK_SIZE;
93248+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
93249+ pte_m = pte_offset_map(pmd_m, address_m);
93250+ ptl_m = pte_lockptr(mm, pmd_m);
93251+ if (ptl != ptl_m) {
93252+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
93253+ if (!pte_none(*pte_m))
93254+ goto out;
93255+ }
93256+
93257+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
93258+ page_cache_get(page_m);
93259+ page_add_anon_rmap(page_m, vma_m, address_m);
93260+ inc_mm_counter_fast(mm, MM_ANONPAGES);
93261+ set_pte_at(mm, address_m, pte_m, entry_m);
93262+ update_mmu_cache(vma_m, address_m, pte_m);
93263+out:
93264+ if (ptl != ptl_m)
93265+ spin_unlock(ptl_m);
93266+ pte_unmap(pte_m);
93267+ unlock_page(page_m);
93268+}
93269+
93270+void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
93271+{
93272+ struct mm_struct *mm = vma->vm_mm;
93273+ unsigned long address_m;
93274+ spinlock_t *ptl_m;
93275+ struct vm_area_struct *vma_m;
93276+ pmd_t *pmd_m;
93277+ pte_t *pte_m, entry_m;
93278+
93279+ BUG_ON(!page_m || PageAnon(page_m));
93280+
93281+ vma_m = pax_find_mirror_vma(vma);
93282+ if (!vma_m)
93283+ return;
93284+
93285+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
93286+ address_m = address + SEGMEXEC_TASK_SIZE;
93287+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
93288+ pte_m = pte_offset_map(pmd_m, address_m);
93289+ ptl_m = pte_lockptr(mm, pmd_m);
93290+ if (ptl != ptl_m) {
93291+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
93292+ if (!pte_none(*pte_m))
93293+ goto out;
93294+ }
93295+
93296+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
93297+ page_cache_get(page_m);
93298+ page_add_file_rmap(page_m);
93299+ inc_mm_counter_fast(mm, MM_FILEPAGES);
93300+ set_pte_at(mm, address_m, pte_m, entry_m);
93301+ update_mmu_cache(vma_m, address_m, pte_m);
93302+out:
93303+ if (ptl != ptl_m)
93304+ spin_unlock(ptl_m);
93305+ pte_unmap(pte_m);
93306+}
93307+
93308+static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
93309+{
93310+ struct mm_struct *mm = vma->vm_mm;
93311+ unsigned long address_m;
93312+ spinlock_t *ptl_m;
93313+ struct vm_area_struct *vma_m;
93314+ pmd_t *pmd_m;
93315+ pte_t *pte_m, entry_m;
93316+
93317+ vma_m = pax_find_mirror_vma(vma);
93318+ if (!vma_m)
93319+ return;
93320+
93321+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
93322+ address_m = address + SEGMEXEC_TASK_SIZE;
93323+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
93324+ pte_m = pte_offset_map(pmd_m, address_m);
93325+ ptl_m = pte_lockptr(mm, pmd_m);
93326+ if (ptl != ptl_m) {
93327+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
93328+ if (!pte_none(*pte_m))
93329+ goto out;
93330+ }
93331+
93332+ entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
93333+ set_pte_at(mm, address_m, pte_m, entry_m);
93334+out:
93335+ if (ptl != ptl_m)
93336+ spin_unlock(ptl_m);
93337+ pte_unmap(pte_m);
93338+}
93339+
93340+static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
93341+{
93342+ struct page *page_m;
93343+ pte_t entry;
93344+
93345+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
93346+ goto out;
93347+
93348+ entry = *pte;
93349+ page_m = vm_normal_page(vma, address, entry);
93350+ if (!page_m)
93351+ pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
93352+ else if (PageAnon(page_m)) {
93353+ if (pax_find_mirror_vma(vma)) {
93354+ pte_unmap_unlock(pte, ptl);
93355+ lock_page(page_m);
93356+ pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
93357+ if (pte_same(entry, *pte))
93358+ pax_mirror_anon_pte(vma, address, page_m, ptl);
93359+ else
93360+ unlock_page(page_m);
93361+ }
93362+ } else
93363+ pax_mirror_file_pte(vma, address, page_m, ptl);
93364+
93365+out:
93366+ pte_unmap_unlock(pte, ptl);
93367+}
93368+#endif
93369+
93370 /*
93371 * This routine handles present pages, when users try to write
93372 * to a shared page. It is done by copying the page to a new address
93373@@ -2815,6 +3011,12 @@ gotten:
93374 */
93375 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
93376 if (likely(pte_same(*page_table, orig_pte))) {
93377+
93378+#ifdef CONFIG_PAX_SEGMEXEC
93379+ if (pax_find_mirror_vma(vma))
93380+ BUG_ON(!trylock_page(new_page));
93381+#endif
93382+
93383 if (old_page) {
93384 if (!PageAnon(old_page)) {
93385 dec_mm_counter_fast(mm, MM_FILEPAGES);
93386@@ -2866,6 +3068,10 @@ gotten:
93387 page_remove_rmap(old_page);
93388 }
93389
93390+#ifdef CONFIG_PAX_SEGMEXEC
93391+ pax_mirror_anon_pte(vma, address, new_page, ptl);
93392+#endif
93393+
93394 /* Free the old page.. */
93395 new_page = old_page;
93396 ret |= VM_FAULT_WRITE;
93397@@ -3143,6 +3349,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
93398 swap_free(entry);
93399 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
93400 try_to_free_swap(page);
93401+
93402+#ifdef CONFIG_PAX_SEGMEXEC
93403+ if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
93404+#endif
93405+
93406 unlock_page(page);
93407 if (page != swapcache) {
93408 /*
93409@@ -3166,6 +3377,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
93410
93411 /* No need to invalidate - it was non-present before */
93412 update_mmu_cache(vma, address, page_table);
93413+
93414+#ifdef CONFIG_PAX_SEGMEXEC
93415+ pax_mirror_anon_pte(vma, address, page, ptl);
93416+#endif
93417+
93418 unlock:
93419 pte_unmap_unlock(page_table, ptl);
93420 out:
93421@@ -3185,40 +3401,6 @@ out_release:
93422 }
93423
93424 /*
93425- * This is like a special single-page "expand_{down|up}wards()",
93426- * except we must first make sure that 'address{-|+}PAGE_SIZE'
93427- * doesn't hit another vma.
93428- */
93429-static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
93430-{
93431- address &= PAGE_MASK;
93432- if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
93433- struct vm_area_struct *prev = vma->vm_prev;
93434-
93435- /*
93436- * Is there a mapping abutting this one below?
93437- *
93438- * That's only ok if it's the same stack mapping
93439- * that has gotten split..
93440- */
93441- if (prev && prev->vm_end == address)
93442- return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
93443-
93444- expand_downwards(vma, address - PAGE_SIZE);
93445- }
93446- if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
93447- struct vm_area_struct *next = vma->vm_next;
93448-
93449- /* As VM_GROWSDOWN but s/below/above/ */
93450- if (next && next->vm_start == address + PAGE_SIZE)
93451- return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
93452-
93453- expand_upwards(vma, address + PAGE_SIZE);
93454- }
93455- return 0;
93456-}
93457-
93458-/*
93459 * We enter with non-exclusive mmap_sem (to exclude vma changes,
93460 * but allow concurrent faults), and pte mapped but not yet locked.
93461 * We return with mmap_sem still held, but pte unmapped and unlocked.
93462@@ -3227,27 +3409,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
93463 unsigned long address, pte_t *page_table, pmd_t *pmd,
93464 unsigned int flags)
93465 {
93466- struct page *page;
93467+ struct page *page = NULL;
93468 spinlock_t *ptl;
93469 pte_t entry;
93470
93471- pte_unmap(page_table);
93472-
93473- /* Check if we need to add a guard page to the stack */
93474- if (check_stack_guard_page(vma, address) < 0)
93475- return VM_FAULT_SIGBUS;
93476-
93477- /* Use the zero-page for reads */
93478 if (!(flags & FAULT_FLAG_WRITE)) {
93479 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
93480 vma->vm_page_prot));
93481- page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
93482+ ptl = pte_lockptr(mm, pmd);
93483+ spin_lock(ptl);
93484 if (!pte_none(*page_table))
93485 goto unlock;
93486 goto setpte;
93487 }
93488
93489 /* Allocate our own private page. */
93490+ pte_unmap(page_table);
93491+
93492 if (unlikely(anon_vma_prepare(vma)))
93493 goto oom;
93494 page = alloc_zeroed_user_highpage_movable(vma, address);
93495@@ -3271,6 +3449,11 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
93496 if (!pte_none(*page_table))
93497 goto release;
93498
93499+#ifdef CONFIG_PAX_SEGMEXEC
93500+ if (pax_find_mirror_vma(vma))
93501+ BUG_ON(!trylock_page(page));
93502+#endif
93503+
93504 inc_mm_counter_fast(mm, MM_ANONPAGES);
93505 page_add_new_anon_rmap(page, vma, address);
93506 setpte:
93507@@ -3278,6 +3461,12 @@ setpte:
93508
93509 /* No need to invalidate - it was non-present before */
93510 update_mmu_cache(vma, address, page_table);
93511+
93512+#ifdef CONFIG_PAX_SEGMEXEC
93513+ if (page)
93514+ pax_mirror_anon_pte(vma, address, page, ptl);
93515+#endif
93516+
93517 unlock:
93518 pte_unmap_unlock(page_table, ptl);
93519 return 0;
93520@@ -3422,6 +3611,12 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
93521 */
93522 /* Only go through if we didn't race with anybody else... */
93523 if (likely(pte_same(*page_table, orig_pte))) {
93524+
93525+#ifdef CONFIG_PAX_SEGMEXEC
93526+ if (anon && pax_find_mirror_vma(vma))
93527+ BUG_ON(!trylock_page(page));
93528+#endif
93529+
93530 flush_icache_page(vma, page);
93531 entry = mk_pte(page, vma->vm_page_prot);
93532 if (flags & FAULT_FLAG_WRITE)
93533@@ -3443,6 +3638,14 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
93534
93535 /* no need to invalidate: a not-present page won't be cached */
93536 update_mmu_cache(vma, address, page_table);
93537+
93538+#ifdef CONFIG_PAX_SEGMEXEC
93539+ if (anon)
93540+ pax_mirror_anon_pte(vma, address, page, ptl);
93541+ else
93542+ pax_mirror_file_pte(vma, address, page, ptl);
93543+#endif
93544+
93545 } else {
93546 if (cow_page)
93547 mem_cgroup_uncharge_page(cow_page);
93548@@ -3690,6 +3893,12 @@ static int handle_pte_fault(struct mm_struct *mm,
93549 if (flags & FAULT_FLAG_WRITE)
93550 flush_tlb_fix_spurious_fault(vma, address);
93551 }
93552+
93553+#ifdef CONFIG_PAX_SEGMEXEC
93554+ pax_mirror_pte(vma, address, pte, pmd, ptl);
93555+ return 0;
93556+#endif
93557+
93558 unlock:
93559 pte_unmap_unlock(pte, ptl);
93560 return 0;
93561@@ -3706,9 +3915,41 @@ static int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
93562 pmd_t *pmd;
93563 pte_t *pte;
93564
93565+#ifdef CONFIG_PAX_SEGMEXEC
93566+ struct vm_area_struct *vma_m;
93567+#endif
93568+
93569 if (unlikely(is_vm_hugetlb_page(vma)))
93570 return hugetlb_fault(mm, vma, address, flags);
93571
93572+#ifdef CONFIG_PAX_SEGMEXEC
93573+ vma_m = pax_find_mirror_vma(vma);
93574+ if (vma_m) {
93575+ unsigned long address_m;
93576+ pgd_t *pgd_m;
93577+ pud_t *pud_m;
93578+ pmd_t *pmd_m;
93579+
93580+ if (vma->vm_start > vma_m->vm_start) {
93581+ address_m = address;
93582+ address -= SEGMEXEC_TASK_SIZE;
93583+ vma = vma_m;
93584+ } else
93585+ address_m = address + SEGMEXEC_TASK_SIZE;
93586+
93587+ pgd_m = pgd_offset(mm, address_m);
93588+ pud_m = pud_alloc(mm, pgd_m, address_m);
93589+ if (!pud_m)
93590+ return VM_FAULT_OOM;
93591+ pmd_m = pmd_alloc(mm, pud_m, address_m);
93592+ if (!pmd_m)
93593+ return VM_FAULT_OOM;
93594+ if (!pmd_present(*pmd_m) && __pte_alloc(mm, vma_m, pmd_m, address_m))
93595+ return VM_FAULT_OOM;
93596+ pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
93597+ }
93598+#endif
93599+
93600 pgd = pgd_offset(mm, address);
93601 pud = pud_alloc(mm, pgd, address);
93602 if (!pud)
93603@@ -3839,6 +4080,23 @@ int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
93604 spin_unlock(&mm->page_table_lock);
93605 return 0;
93606 }
93607+
93608+int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
93609+{
93610+ pud_t *new = pud_alloc_one(mm, address);
93611+ if (!new)
93612+ return -ENOMEM;
93613+
93614+ smp_wmb(); /* See comment in __pte_alloc */
93615+
93616+ spin_lock(&mm->page_table_lock);
93617+ if (pgd_present(*pgd)) /* Another has populated it */
93618+ pud_free(mm, new);
93619+ else
93620+ pgd_populate_kernel(mm, pgd, new);
93621+ spin_unlock(&mm->page_table_lock);
93622+ return 0;
93623+}
93624 #endif /* __PAGETABLE_PUD_FOLDED */
93625
93626 #ifndef __PAGETABLE_PMD_FOLDED
93627@@ -3869,6 +4127,30 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
93628 spin_unlock(&mm->page_table_lock);
93629 return 0;
93630 }
93631+
93632+int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address)
93633+{
93634+ pmd_t *new = pmd_alloc_one(mm, address);
93635+ if (!new)
93636+ return -ENOMEM;
93637+
93638+ smp_wmb(); /* See comment in __pte_alloc */
93639+
93640+ spin_lock(&mm->page_table_lock);
93641+#ifndef __ARCH_HAS_4LEVEL_HACK
93642+ if (pud_present(*pud)) /* Another has populated it */
93643+ pmd_free(mm, new);
93644+ else
93645+ pud_populate_kernel(mm, pud, new);
93646+#else
93647+ if (pgd_present(*pud)) /* Another has populated it */
93648+ pmd_free(mm, new);
93649+ else
93650+ pgd_populate_kernel(mm, pud, new);
93651+#endif /* __ARCH_HAS_4LEVEL_HACK */
93652+ spin_unlock(&mm->page_table_lock);
93653+ return 0;
93654+}
93655 #endif /* __PAGETABLE_PMD_FOLDED */
93656
93657 #if !defined(__HAVE_ARCH_GATE_AREA)
93658@@ -3882,7 +4164,7 @@ static int __init gate_vma_init(void)
93659 gate_vma.vm_start = FIXADDR_USER_START;
93660 gate_vma.vm_end = FIXADDR_USER_END;
93661 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
93662- gate_vma.vm_page_prot = __P101;
93663+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
93664
93665 return 0;
93666 }
93667@@ -4016,8 +4298,8 @@ out:
93668 return ret;
93669 }
93670
93671-int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
93672- void *buf, int len, int write)
93673+ssize_t generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
93674+ void *buf, size_t len, int write)
93675 {
93676 resource_size_t phys_addr;
93677 unsigned long prot = 0;
93678@@ -4043,8 +4325,8 @@ EXPORT_SYMBOL_GPL(generic_access_phys);
93679 * Access another process' address space as given in mm. If non-NULL, use the
93680 * given task for page fault accounting.
93681 */
93682-static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
93683- unsigned long addr, void *buf, int len, int write)
93684+static ssize_t __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
93685+ unsigned long addr, void *buf, size_t len, int write)
93686 {
93687 struct vm_area_struct *vma;
93688 void *old_buf = buf;
93689@@ -4052,7 +4334,7 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
93690 down_read(&mm->mmap_sem);
93691 /* ignore errors, just check how much was successfully transferred */
93692 while (len) {
93693- int bytes, ret, offset;
93694+ ssize_t bytes, ret, offset;
93695 void *maddr;
93696 struct page *page = NULL;
93697
93698@@ -4111,8 +4393,8 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
93699 *
93700 * The caller must hold a reference on @mm.
93701 */
93702-int access_remote_vm(struct mm_struct *mm, unsigned long addr,
93703- void *buf, int len, int write)
93704+ssize_t access_remote_vm(struct mm_struct *mm, unsigned long addr,
93705+ void *buf, size_t len, int write)
93706 {
93707 return __access_remote_vm(NULL, mm, addr, buf, len, write);
93708 }
93709@@ -4122,11 +4404,11 @@ int access_remote_vm(struct mm_struct *mm, unsigned long addr,
93710 * Source/target buffer must be kernel space,
93711 * Do not walk the page table directly, use get_user_pages
93712 */
93713-int access_process_vm(struct task_struct *tsk, unsigned long addr,
93714- void *buf, int len, int write)
93715+ssize_t access_process_vm(struct task_struct *tsk, unsigned long addr,
93716+ void *buf, size_t len, int write)
93717 {
93718 struct mm_struct *mm;
93719- int ret;
93720+ ssize_t ret;
93721
93722 mm = get_task_mm(tsk);
93723 if (!mm)
93724diff --git a/mm/mempolicy.c b/mm/mempolicy.c
93725index ae3c8f3..fa4ee8e 100644
93726--- a/mm/mempolicy.c
93727+++ b/mm/mempolicy.c
93728@@ -746,6 +746,10 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
93729 unsigned long vmstart;
93730 unsigned long vmend;
93731
93732+#ifdef CONFIG_PAX_SEGMEXEC
93733+ struct vm_area_struct *vma_m;
93734+#endif
93735+
93736 vma = find_vma(mm, start);
93737 if (!vma || vma->vm_start > start)
93738 return -EFAULT;
93739@@ -789,6 +793,16 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
93740 err = vma_replace_policy(vma, new_pol);
93741 if (err)
93742 goto out;
93743+
93744+#ifdef CONFIG_PAX_SEGMEXEC
93745+ vma_m = pax_find_mirror_vma(vma);
93746+ if (vma_m) {
93747+ err = vma_replace_policy(vma_m, new_pol);
93748+ if (err)
93749+ goto out;
93750+ }
93751+#endif
93752+
93753 }
93754
93755 out:
93756@@ -1252,6 +1266,17 @@ static long do_mbind(unsigned long start, unsigned long len,
93757
93758 if (end < start)
93759 return -EINVAL;
93760+
93761+#ifdef CONFIG_PAX_SEGMEXEC
93762+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
93763+ if (end > SEGMEXEC_TASK_SIZE)
93764+ return -EINVAL;
93765+ } else
93766+#endif
93767+
93768+ if (end > TASK_SIZE)
93769+ return -EINVAL;
93770+
93771 if (end == start)
93772 return 0;
93773
93774@@ -1480,8 +1505,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
93775 */
93776 tcred = __task_cred(task);
93777 if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
93778- !uid_eq(cred->uid, tcred->suid) && !uid_eq(cred->uid, tcred->uid) &&
93779- !capable(CAP_SYS_NICE)) {
93780+ !uid_eq(cred->uid, tcred->suid) && !capable(CAP_SYS_NICE)) {
93781 rcu_read_unlock();
93782 err = -EPERM;
93783 goto out_put;
93784@@ -1512,6 +1536,15 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
93785 goto out;
93786 }
93787
93788+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
93789+ if (mm != current->mm &&
93790+ (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
93791+ mmput(mm);
93792+ err = -EPERM;
93793+ goto out;
93794+ }
93795+#endif
93796+
93797 err = do_migrate_pages(mm, old, new,
93798 capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
93799
93800diff --git a/mm/migrate.c b/mm/migrate.c
93801index bed4880..a493f67 100644
93802--- a/mm/migrate.c
93803+++ b/mm/migrate.c
93804@@ -1485,8 +1485,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
93805 */
93806 tcred = __task_cred(task);
93807 if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
93808- !uid_eq(cred->uid, tcred->suid) && !uid_eq(cred->uid, tcred->uid) &&
93809- !capable(CAP_SYS_NICE)) {
93810+ !uid_eq(cred->uid, tcred->suid) && !capable(CAP_SYS_NICE)) {
93811 rcu_read_unlock();
93812 err = -EPERM;
93813 goto out;
93814diff --git a/mm/mlock.c b/mm/mlock.c
93815index b1eb536..091d154 100644
93816--- a/mm/mlock.c
93817+++ b/mm/mlock.c
93818@@ -14,6 +14,7 @@
93819 #include <linux/pagevec.h>
93820 #include <linux/mempolicy.h>
93821 #include <linux/syscalls.h>
93822+#include <linux/security.h>
93823 #include <linux/sched.h>
93824 #include <linux/export.h>
93825 #include <linux/rmap.h>
93826@@ -606,7 +607,7 @@ static int do_mlock(unsigned long start, size_t len, int on)
93827 {
93828 unsigned long nstart, end, tmp;
93829 struct vm_area_struct * vma, * prev;
93830- int error;
93831+ int error = 0;
93832
93833 VM_BUG_ON(start & ~PAGE_MASK);
93834 VM_BUG_ON(len != PAGE_ALIGN(len));
93835@@ -615,6 +616,9 @@ static int do_mlock(unsigned long start, size_t len, int on)
93836 return -EINVAL;
93837 if (end == start)
93838 return 0;
93839+ if (end > TASK_SIZE)
93840+ return -EINVAL;
93841+
93842 vma = find_vma(current->mm, start);
93843 if (!vma || vma->vm_start > start)
93844 return -ENOMEM;
93845@@ -626,6 +630,11 @@ static int do_mlock(unsigned long start, size_t len, int on)
93846 for (nstart = start ; ; ) {
93847 vm_flags_t newflags;
93848
93849+#ifdef CONFIG_PAX_SEGMEXEC
93850+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
93851+ break;
93852+#endif
93853+
93854 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
93855
93856 newflags = vma->vm_flags & ~VM_LOCKED;
93857@@ -739,6 +748,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len)
93858 locked += current->mm->locked_vm;
93859
93860 /* check against resource limits */
93861+ gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
93862 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
93863 error = do_mlock(start, len, 1);
93864
93865@@ -776,6 +786,11 @@ static int do_mlockall(int flags)
93866 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
93867 vm_flags_t newflags;
93868
93869+#ifdef CONFIG_PAX_SEGMEXEC
93870+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
93871+ break;
93872+#endif
93873+
93874 newflags = vma->vm_flags & ~VM_LOCKED;
93875 if (flags & MCL_CURRENT)
93876 newflags |= VM_LOCKED;
93877@@ -807,8 +822,10 @@ SYSCALL_DEFINE1(mlockall, int, flags)
93878 lock_limit >>= PAGE_SHIFT;
93879
93880 ret = -ENOMEM;
93881+
93882+ gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
93883+
93884 down_write(&current->mm->mmap_sem);
93885-
93886 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
93887 capable(CAP_IPC_LOCK))
93888 ret = do_mlockall(flags);
93889diff --git a/mm/mmap.c b/mm/mmap.c
93890index 20ff0c3..a9eda98 100644
93891--- a/mm/mmap.c
93892+++ b/mm/mmap.c
93893@@ -36,6 +36,7 @@
93894 #include <linux/sched/sysctl.h>
93895 #include <linux/notifier.h>
93896 #include <linux/memory.h>
93897+#include <linux/random.h>
93898
93899 #include <asm/uaccess.h>
93900 #include <asm/cacheflush.h>
93901@@ -52,6 +53,16 @@
93902 #define arch_rebalance_pgtables(addr, len) (addr)
93903 #endif
93904
93905+static inline void verify_mm_writelocked(struct mm_struct *mm)
93906+{
93907+#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
93908+ if (unlikely(down_read_trylock(&mm->mmap_sem))) {
93909+ up_read(&mm->mmap_sem);
93910+ BUG();
93911+ }
93912+#endif
93913+}
93914+
93915 static void unmap_region(struct mm_struct *mm,
93916 struct vm_area_struct *vma, struct vm_area_struct *prev,
93917 unsigned long start, unsigned long end);
93918@@ -71,16 +82,25 @@ static void unmap_region(struct mm_struct *mm,
93919 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
93920 *
93921 */
93922-pgprot_t protection_map[16] = {
93923+pgprot_t protection_map[16] __read_only = {
93924 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
93925 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
93926 };
93927
93928-pgprot_t vm_get_page_prot(unsigned long vm_flags)
93929+pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
93930 {
93931- return __pgprot(pgprot_val(protection_map[vm_flags &
93932+ pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
93933 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
93934 pgprot_val(arch_vm_get_page_prot(vm_flags)));
93935+
93936+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
93937+ if (!(__supported_pte_mask & _PAGE_NX) &&
93938+ (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
93939+ (vm_flags & (VM_READ | VM_WRITE)))
93940+ prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
93941+#endif
93942+
93943+ return prot;
93944 }
93945 EXPORT_SYMBOL(vm_get_page_prot);
93946
93947@@ -90,6 +110,7 @@ unsigned long sysctl_overcommit_kbytes __read_mostly;
93948 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
93949 unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */
93950 unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */
93951+unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
93952 /*
93953 * Make sure vm_committed_as in one cacheline and not cacheline shared with
93954 * other variables. It can be updated by several CPUs frequently.
93955@@ -246,6 +267,7 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
93956 struct vm_area_struct *next = vma->vm_next;
93957
93958 might_sleep();
93959+ BUG_ON(vma->vm_mirror);
93960 if (vma->vm_ops && vma->vm_ops->close)
93961 vma->vm_ops->close(vma);
93962 if (vma->vm_file)
93963@@ -290,6 +312,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
93964 * not page aligned -Ram Gupta
93965 */
93966 rlim = rlimit(RLIMIT_DATA);
93967+ gr_learn_resource(current, RLIMIT_DATA, (brk - mm->start_brk) + (mm->end_data - mm->start_data), 1);
93968 if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
93969 (mm->end_data - mm->start_data) > rlim)
93970 goto out;
93971@@ -940,6 +963,12 @@ static int
93972 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
93973 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
93974 {
93975+
93976+#ifdef CONFIG_PAX_SEGMEXEC
93977+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
93978+ return 0;
93979+#endif
93980+
93981 if (is_mergeable_vma(vma, file, vm_flags) &&
93982 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
93983 if (vma->vm_pgoff == vm_pgoff)
93984@@ -959,6 +988,12 @@ static int
93985 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
93986 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
93987 {
93988+
93989+#ifdef CONFIG_PAX_SEGMEXEC
93990+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
93991+ return 0;
93992+#endif
93993+
93994 if (is_mergeable_vma(vma, file, vm_flags) &&
93995 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
93996 pgoff_t vm_pglen;
93997@@ -1001,13 +1036,20 @@ can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
93998 struct vm_area_struct *vma_merge(struct mm_struct *mm,
93999 struct vm_area_struct *prev, unsigned long addr,
94000 unsigned long end, unsigned long vm_flags,
94001- struct anon_vma *anon_vma, struct file *file,
94002+ struct anon_vma *anon_vma, struct file *file,
94003 pgoff_t pgoff, struct mempolicy *policy)
94004 {
94005 pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
94006 struct vm_area_struct *area, *next;
94007 int err;
94008
94009+#ifdef CONFIG_PAX_SEGMEXEC
94010+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
94011+ struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
94012+
94013+ BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
94014+#endif
94015+
94016 /*
94017 * We later require that vma->vm_flags == vm_flags,
94018 * so this tests vma->vm_flags & VM_SPECIAL, too.
94019@@ -1023,6 +1065,15 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
94020 if (next && next->vm_end == end) /* cases 6, 7, 8 */
94021 next = next->vm_next;
94022
94023+#ifdef CONFIG_PAX_SEGMEXEC
94024+ if (prev)
94025+ prev_m = pax_find_mirror_vma(prev);
94026+ if (area)
94027+ area_m = pax_find_mirror_vma(area);
94028+ if (next)
94029+ next_m = pax_find_mirror_vma(next);
94030+#endif
94031+
94032 /*
94033 * Can it merge with the predecessor?
94034 */
94035@@ -1042,9 +1093,24 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
94036 /* cases 1, 6 */
94037 err = vma_adjust(prev, prev->vm_start,
94038 next->vm_end, prev->vm_pgoff, NULL);
94039- } else /* cases 2, 5, 7 */
94040+
94041+#ifdef CONFIG_PAX_SEGMEXEC
94042+ if (!err && prev_m)
94043+ err = vma_adjust(prev_m, prev_m->vm_start,
94044+ next_m->vm_end, prev_m->vm_pgoff, NULL);
94045+#endif
94046+
94047+ } else { /* cases 2, 5, 7 */
94048 err = vma_adjust(prev, prev->vm_start,
94049 end, prev->vm_pgoff, NULL);
94050+
94051+#ifdef CONFIG_PAX_SEGMEXEC
94052+ if (!err && prev_m)
94053+ err = vma_adjust(prev_m, prev_m->vm_start,
94054+ end_m, prev_m->vm_pgoff, NULL);
94055+#endif
94056+
94057+ }
94058 if (err)
94059 return NULL;
94060 khugepaged_enter_vma_merge(prev);
94061@@ -1058,12 +1124,27 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
94062 mpol_equal(policy, vma_policy(next)) &&
94063 can_vma_merge_before(next, vm_flags,
94064 anon_vma, file, pgoff+pglen)) {
94065- if (prev && addr < prev->vm_end) /* case 4 */
94066+ if (prev && addr < prev->vm_end) { /* case 4 */
94067 err = vma_adjust(prev, prev->vm_start,
94068 addr, prev->vm_pgoff, NULL);
94069- else /* cases 3, 8 */
94070+
94071+#ifdef CONFIG_PAX_SEGMEXEC
94072+ if (!err && prev_m)
94073+ err = vma_adjust(prev_m, prev_m->vm_start,
94074+ addr_m, prev_m->vm_pgoff, NULL);
94075+#endif
94076+
94077+ } else { /* cases 3, 8 */
94078 err = vma_adjust(area, addr, next->vm_end,
94079 next->vm_pgoff - pglen, NULL);
94080+
94081+#ifdef CONFIG_PAX_SEGMEXEC
94082+ if (!err && area_m)
94083+ err = vma_adjust(area_m, addr_m, next_m->vm_end,
94084+ next_m->vm_pgoff - pglen, NULL);
94085+#endif
94086+
94087+ }
94088 if (err)
94089 return NULL;
94090 khugepaged_enter_vma_merge(area);
94091@@ -1172,8 +1253,10 @@ none:
94092 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
94093 struct file *file, long pages)
94094 {
94095- const unsigned long stack_flags
94096- = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
94097+
94098+#ifdef CONFIG_PAX_RANDMMAP
94099+ if (!(mm->pax_flags & MF_PAX_RANDMMAP) || (flags & (VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)))
94100+#endif
94101
94102 mm->total_vm += pages;
94103
94104@@ -1181,7 +1264,7 @@ void vm_stat_account(struct mm_struct *mm, unsigned long flags,
94105 mm->shared_vm += pages;
94106 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
94107 mm->exec_vm += pages;
94108- } else if (flags & stack_flags)
94109+ } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
94110 mm->stack_vm += pages;
94111 }
94112 #endif /* CONFIG_PROC_FS */
94113@@ -1211,6 +1294,7 @@ static inline int mlock_future_check(struct mm_struct *mm,
94114 locked += mm->locked_vm;
94115 lock_limit = rlimit(RLIMIT_MEMLOCK);
94116 lock_limit >>= PAGE_SHIFT;
94117+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
94118 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
94119 return -EAGAIN;
94120 }
94121@@ -1237,7 +1321,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
94122 * (the exception is when the underlying filesystem is noexec
94123 * mounted, in which case we dont add PROT_EXEC.)
94124 */
94125- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
94126+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
94127 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
94128 prot |= PROT_EXEC;
94129
94130@@ -1263,7 +1347,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
94131 /* Obtain the address to map to. we verify (or select) it and ensure
94132 * that it represents a valid section of the address space.
94133 */
94134- addr = get_unmapped_area(file, addr, len, pgoff, flags);
94135+ addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
94136 if (addr & ~PAGE_MASK)
94137 return addr;
94138
94139@@ -1274,6 +1358,43 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
94140 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
94141 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
94142
94143+#ifdef CONFIG_PAX_MPROTECT
94144+ if (mm->pax_flags & MF_PAX_MPROTECT) {
94145+
94146+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
94147+ if (file && !pgoff && (vm_flags & VM_EXEC) && mm->binfmt &&
94148+ mm->binfmt->handle_mmap)
94149+ mm->binfmt->handle_mmap(file);
94150+#endif
94151+
94152+#ifndef CONFIG_PAX_MPROTECT_COMPAT
94153+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
94154+ gr_log_rwxmmap(file);
94155+
94156+#ifdef CONFIG_PAX_EMUPLT
94157+ vm_flags &= ~VM_EXEC;
94158+#else
94159+ return -EPERM;
94160+#endif
94161+
94162+ }
94163+
94164+ if (!(vm_flags & VM_EXEC))
94165+ vm_flags &= ~VM_MAYEXEC;
94166+#else
94167+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
94168+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
94169+#endif
94170+ else
94171+ vm_flags &= ~VM_MAYWRITE;
94172+ }
94173+#endif
94174+
94175+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
94176+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
94177+ vm_flags &= ~VM_PAGEEXEC;
94178+#endif
94179+
94180 if (flags & MAP_LOCKED)
94181 if (!can_do_mlock())
94182 return -EPERM;
94183@@ -1361,6 +1482,9 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
94184 vm_flags |= VM_NORESERVE;
94185 }
94186
94187+ if (!gr_acl_handle_mmap(file, prot))
94188+ return -EACCES;
94189+
94190 addr = mmap_region(file, addr, len, vm_flags, pgoff);
94191 if (!IS_ERR_VALUE(addr) &&
94192 ((vm_flags & VM_LOCKED) ||
94193@@ -1454,7 +1578,7 @@ int vma_wants_writenotify(struct vm_area_struct *vma)
94194 vm_flags_t vm_flags = vma->vm_flags;
94195
94196 /* If it was private or non-writable, the write bit is already clear */
94197- if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
94198+ if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
94199 return 0;
94200
94201 /* The backer wishes to know when pages are first written to? */
94202@@ -1500,7 +1624,22 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
94203 struct rb_node **rb_link, *rb_parent;
94204 unsigned long charged = 0;
94205
94206+#ifdef CONFIG_PAX_SEGMEXEC
94207+ struct vm_area_struct *vma_m = NULL;
94208+#endif
94209+
94210+ /*
94211+ * mm->mmap_sem is required to protect against another thread
94212+ * changing the mappings in case we sleep.
94213+ */
94214+ verify_mm_writelocked(mm);
94215+
94216 /* Check against address space limit. */
94217+
94218+#ifdef CONFIG_PAX_RANDMMAP
94219+ if (!(mm->pax_flags & MF_PAX_RANDMMAP) || (vm_flags & (VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)))
94220+#endif
94221+
94222 if (!may_expand_vm(mm, len >> PAGE_SHIFT)) {
94223 unsigned long nr_pages;
94224
94225@@ -1519,11 +1658,10 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
94226
94227 /* Clear old maps */
94228 error = -ENOMEM;
94229-munmap_back:
94230 if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) {
94231 if (do_munmap(mm, addr, len))
94232 return -ENOMEM;
94233- goto munmap_back;
94234+ BUG_ON(find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent));
94235 }
94236
94237 /*
94238@@ -1554,6 +1692,16 @@ munmap_back:
94239 goto unacct_error;
94240 }
94241
94242+#ifdef CONFIG_PAX_SEGMEXEC
94243+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
94244+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
94245+ if (!vma_m) {
94246+ error = -ENOMEM;
94247+ goto free_vma;
94248+ }
94249+ }
94250+#endif
94251+
94252 vma->vm_mm = mm;
94253 vma->vm_start = addr;
94254 vma->vm_end = addr + len;
94255@@ -1573,6 +1721,13 @@ munmap_back:
94256 if (error)
94257 goto unmap_and_free_vma;
94258
94259+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
94260+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
94261+ vma->vm_flags |= VM_PAGEEXEC;
94262+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
94263+ }
94264+#endif
94265+
94266 /* Can addr have changed??
94267 *
94268 * Answer: Yes, several device drivers can do it in their
94269@@ -1606,6 +1761,12 @@ munmap_back:
94270 }
94271
94272 vma_link(mm, vma, prev, rb_link, rb_parent);
94273+
94274+#ifdef CONFIG_PAX_SEGMEXEC
94275+ if (vma_m)
94276+ BUG_ON(pax_mirror_vma(vma_m, vma));
94277+#endif
94278+
94279 /* Once vma denies write, undo our temporary denial count */
94280 if (vm_flags & VM_DENYWRITE)
94281 allow_write_access(file);
94282@@ -1614,6 +1775,7 @@ out:
94283 perf_event_mmap(vma);
94284
94285 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
94286+ track_exec_limit(mm, addr, addr + len, vm_flags);
94287 if (vm_flags & VM_LOCKED) {
94288 if (!((vm_flags & VM_SPECIAL) || is_vm_hugetlb_page(vma) ||
94289 vma == get_gate_vma(current->mm)))
94290@@ -1646,6 +1808,12 @@ unmap_and_free_vma:
94291 unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
94292 charged = 0;
94293 free_vma:
94294+
94295+#ifdef CONFIG_PAX_SEGMEXEC
94296+ if (vma_m)
94297+ kmem_cache_free(vm_area_cachep, vma_m);
94298+#endif
94299+
94300 kmem_cache_free(vm_area_cachep, vma);
94301 unacct_error:
94302 if (charged)
94303@@ -1653,7 +1821,63 @@ unacct_error:
94304 return error;
94305 }
94306
94307-unsigned long unmapped_area(struct vm_unmapped_area_info *info)
94308+#ifdef CONFIG_GRKERNSEC_RAND_THREADSTACK
94309+unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags)
94310+{
94311+ if ((mm->pax_flags & MF_PAX_RANDMMAP) && !filp && (flags & MAP_STACK))
94312+ return ((prandom_u32() & 0xFF) + 1) << PAGE_SHIFT;
94313+
94314+ return 0;
94315+}
94316+#endif
94317+
94318+bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len, unsigned long offset)
94319+{
94320+ if (!vma) {
94321+#ifdef CONFIG_STACK_GROWSUP
94322+ if (addr > sysctl_heap_stack_gap)
94323+ vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
94324+ else
94325+ vma = find_vma(current->mm, 0);
94326+ if (vma && (vma->vm_flags & VM_GROWSUP))
94327+ return false;
94328+#endif
94329+ return true;
94330+ }
94331+
94332+ if (addr + len > vma->vm_start)
94333+ return false;
94334+
94335+ if (vma->vm_flags & VM_GROWSDOWN)
94336+ return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
94337+#ifdef CONFIG_STACK_GROWSUP
94338+ else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
94339+ return addr - vma->vm_prev->vm_end >= sysctl_heap_stack_gap;
94340+#endif
94341+ else if (offset)
94342+ return offset <= vma->vm_start - addr - len;
94343+
94344+ return true;
94345+}
94346+
94347+unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len, unsigned long offset)
94348+{
94349+ if (vma->vm_start < len)
94350+ return -ENOMEM;
94351+
94352+ if (!(vma->vm_flags & VM_GROWSDOWN)) {
94353+ if (offset <= vma->vm_start - len)
94354+ return vma->vm_start - len - offset;
94355+ else
94356+ return -ENOMEM;
94357+ }
94358+
94359+ if (sysctl_heap_stack_gap <= vma->vm_start - len)
94360+ return vma->vm_start - len - sysctl_heap_stack_gap;
94361+ return -ENOMEM;
94362+}
94363+
94364+unsigned long unmapped_area(const struct vm_unmapped_area_info *info)
94365 {
94366 /*
94367 * We implement the search by looking for an rbtree node that
94368@@ -1701,11 +1925,29 @@ unsigned long unmapped_area(struct vm_unmapped_area_info *info)
94369 }
94370 }
94371
94372- gap_start = vma->vm_prev ? vma->vm_prev->vm_end : 0;
94373+ gap_start = vma->vm_prev ? vma->vm_prev->vm_end: 0;
94374 check_current:
94375 /* Check if current node has a suitable gap */
94376 if (gap_start > high_limit)
94377 return -ENOMEM;
94378+
94379+ if (gap_end - gap_start > info->threadstack_offset)
94380+ gap_start += info->threadstack_offset;
94381+ else
94382+ gap_start = gap_end;
94383+
94384+ if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP)) {
94385+ if (gap_end - gap_start > sysctl_heap_stack_gap)
94386+ gap_start += sysctl_heap_stack_gap;
94387+ else
94388+ gap_start = gap_end;
94389+ }
94390+ if (vma->vm_flags & VM_GROWSDOWN) {
94391+ if (gap_end - gap_start > sysctl_heap_stack_gap)
94392+ gap_end -= sysctl_heap_stack_gap;
94393+ else
94394+ gap_end = gap_start;
94395+ }
94396 if (gap_end >= low_limit && gap_end - gap_start >= length)
94397 goto found;
94398
94399@@ -1755,7 +1997,7 @@ found:
94400 return gap_start;
94401 }
94402
94403-unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info)
94404+unsigned long unmapped_area_topdown(const struct vm_unmapped_area_info *info)
94405 {
94406 struct mm_struct *mm = current->mm;
94407 struct vm_area_struct *vma;
94408@@ -1809,6 +2051,24 @@ check_current:
94409 gap_end = vma->vm_start;
94410 if (gap_end < low_limit)
94411 return -ENOMEM;
94412+
94413+ if (gap_end - gap_start > info->threadstack_offset)
94414+ gap_end -= info->threadstack_offset;
94415+ else
94416+ gap_end = gap_start;
94417+
94418+ if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP)) {
94419+ if (gap_end - gap_start > sysctl_heap_stack_gap)
94420+ gap_start += sysctl_heap_stack_gap;
94421+ else
94422+ gap_start = gap_end;
94423+ }
94424+ if (vma->vm_flags & VM_GROWSDOWN) {
94425+ if (gap_end - gap_start > sysctl_heap_stack_gap)
94426+ gap_end -= sysctl_heap_stack_gap;
94427+ else
94428+ gap_end = gap_start;
94429+ }
94430 if (gap_start <= high_limit && gap_end - gap_start >= length)
94431 goto found;
94432
94433@@ -1872,6 +2132,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
94434 struct mm_struct *mm = current->mm;
94435 struct vm_area_struct *vma;
94436 struct vm_unmapped_area_info info;
94437+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
94438
94439 if (len > TASK_SIZE - mmap_min_addr)
94440 return -ENOMEM;
94441@@ -1879,11 +2140,15 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
94442 if (flags & MAP_FIXED)
94443 return addr;
94444
94445+#ifdef CONFIG_PAX_RANDMMAP
94446+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
94447+#endif
94448+
94449 if (addr) {
94450 addr = PAGE_ALIGN(addr);
94451 vma = find_vma(mm, addr);
94452 if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
94453- (!vma || addr + len <= vma->vm_start))
94454+ check_heap_stack_gap(vma, addr, len, offset))
94455 return addr;
94456 }
94457
94458@@ -1892,6 +2157,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
94459 info.low_limit = mm->mmap_base;
94460 info.high_limit = TASK_SIZE;
94461 info.align_mask = 0;
94462+ info.threadstack_offset = offset;
94463 return vm_unmapped_area(&info);
94464 }
94465 #endif
94466@@ -1910,6 +2176,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
94467 struct mm_struct *mm = current->mm;
94468 unsigned long addr = addr0;
94469 struct vm_unmapped_area_info info;
94470+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
94471
94472 /* requested length too big for entire address space */
94473 if (len > TASK_SIZE - mmap_min_addr)
94474@@ -1918,12 +2185,16 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
94475 if (flags & MAP_FIXED)
94476 return addr;
94477
94478+#ifdef CONFIG_PAX_RANDMMAP
94479+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
94480+#endif
94481+
94482 /* requesting a specific address */
94483 if (addr) {
94484 addr = PAGE_ALIGN(addr);
94485 vma = find_vma(mm, addr);
94486 if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
94487- (!vma || addr + len <= vma->vm_start))
94488+ check_heap_stack_gap(vma, addr, len, offset))
94489 return addr;
94490 }
94491
94492@@ -1932,6 +2203,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
94493 info.low_limit = max(PAGE_SIZE, mmap_min_addr);
94494 info.high_limit = mm->mmap_base;
94495 info.align_mask = 0;
94496+ info.threadstack_offset = offset;
94497 addr = vm_unmapped_area(&info);
94498
94499 /*
94500@@ -1944,6 +2216,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
94501 VM_BUG_ON(addr != -ENOMEM);
94502 info.flags = 0;
94503 info.low_limit = TASK_UNMAPPED_BASE;
94504+
94505+#ifdef CONFIG_PAX_RANDMMAP
94506+ if (mm->pax_flags & MF_PAX_RANDMMAP)
94507+ info.low_limit += mm->delta_mmap;
94508+#endif
94509+
94510 info.high_limit = TASK_SIZE;
94511 addr = vm_unmapped_area(&info);
94512 }
94513@@ -2045,6 +2323,28 @@ find_vma_prev(struct mm_struct *mm, unsigned long addr,
94514 return vma;
94515 }
94516
94517+#ifdef CONFIG_PAX_SEGMEXEC
94518+struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
94519+{
94520+ struct vm_area_struct *vma_m;
94521+
94522+ BUG_ON(!vma || vma->vm_start >= vma->vm_end);
94523+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
94524+ BUG_ON(vma->vm_mirror);
94525+ return NULL;
94526+ }
94527+ BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
94528+ vma_m = vma->vm_mirror;
94529+ BUG_ON(!vma_m || vma_m->vm_mirror != vma);
94530+ BUG_ON(vma->vm_file != vma_m->vm_file);
94531+ BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
94532+ BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff);
94533+ BUG_ON(vma->anon_vma != vma_m->anon_vma && vma->anon_vma->root != vma_m->anon_vma->root);
94534+ BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED));
94535+ return vma_m;
94536+}
94537+#endif
94538+
94539 /*
94540 * Verify that the stack growth is acceptable and
94541 * update accounting. This is shared with both the
94542@@ -2061,6 +2361,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
94543 return -ENOMEM;
94544
94545 /* Stack limit test */
94546+ gr_learn_resource(current, RLIMIT_STACK, size, 1);
94547 if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
94548 return -ENOMEM;
94549
94550@@ -2071,6 +2372,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
94551 locked = mm->locked_vm + grow;
94552 limit = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur);
94553 limit >>= PAGE_SHIFT;
94554+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
94555 if (locked > limit && !capable(CAP_IPC_LOCK))
94556 return -ENOMEM;
94557 }
94558@@ -2100,37 +2402,48 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
94559 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
94560 * vma is the last one with address > vma->vm_end. Have to extend vma.
94561 */
94562+#ifndef CONFIG_IA64
94563+static
94564+#endif
94565 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
94566 {
94567 int error;
94568+ bool locknext;
94569
94570 if (!(vma->vm_flags & VM_GROWSUP))
94571 return -EFAULT;
94572
94573+ /* Also guard against wrapping around to address 0. */
94574+ if (address < PAGE_ALIGN(address+1))
94575+ address = PAGE_ALIGN(address+1);
94576+ else
94577+ return -ENOMEM;
94578+
94579 /*
94580 * We must make sure the anon_vma is allocated
94581 * so that the anon_vma locking is not a noop.
94582 */
94583 if (unlikely(anon_vma_prepare(vma)))
94584 return -ENOMEM;
94585+ locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
94586+ if (locknext && anon_vma_prepare(vma->vm_next))
94587+ return -ENOMEM;
94588 vma_lock_anon_vma(vma);
94589+ if (locknext)
94590+ vma_lock_anon_vma(vma->vm_next);
94591
94592 /*
94593 * vma->vm_start/vm_end cannot change under us because the caller
94594 * is required to hold the mmap_sem in read mode. We need the
94595- * anon_vma lock to serialize against concurrent expand_stacks.
94596- * Also guard against wrapping around to address 0.
94597+ * anon_vma locks to serialize against concurrent expand_stacks
94598+ * and expand_upwards.
94599 */
94600- if (address < PAGE_ALIGN(address+4))
94601- address = PAGE_ALIGN(address+4);
94602- else {
94603- vma_unlock_anon_vma(vma);
94604- return -ENOMEM;
94605- }
94606 error = 0;
94607
94608 /* Somebody else might have raced and expanded it already */
94609- if (address > vma->vm_end) {
94610+ if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
94611+ error = -ENOMEM;
94612+ else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
94613 unsigned long size, grow;
94614
94615 size = address - vma->vm_start;
94616@@ -2165,6 +2478,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
94617 }
94618 }
94619 }
94620+ if (locknext)
94621+ vma_unlock_anon_vma(vma->vm_next);
94622 vma_unlock_anon_vma(vma);
94623 khugepaged_enter_vma_merge(vma);
94624 validate_mm(vma->vm_mm);
94625@@ -2179,6 +2494,8 @@ int expand_downwards(struct vm_area_struct *vma,
94626 unsigned long address)
94627 {
94628 int error;
94629+ bool lockprev = false;
94630+ struct vm_area_struct *prev;
94631
94632 /*
94633 * We must make sure the anon_vma is allocated
94634@@ -2192,6 +2509,15 @@ int expand_downwards(struct vm_area_struct *vma,
94635 if (error)
94636 return error;
94637
94638+ prev = vma->vm_prev;
94639+#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
94640+ lockprev = prev && (prev->vm_flags & VM_GROWSUP);
94641+#endif
94642+ if (lockprev && anon_vma_prepare(prev))
94643+ return -ENOMEM;
94644+ if (lockprev)
94645+ vma_lock_anon_vma(prev);
94646+
94647 vma_lock_anon_vma(vma);
94648
94649 /*
94650@@ -2201,9 +2527,17 @@ int expand_downwards(struct vm_area_struct *vma,
94651 */
94652
94653 /* Somebody else might have raced and expanded it already */
94654- if (address < vma->vm_start) {
94655+ if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
94656+ error = -ENOMEM;
94657+ else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
94658 unsigned long size, grow;
94659
94660+#ifdef CONFIG_PAX_SEGMEXEC
94661+ struct vm_area_struct *vma_m;
94662+
94663+ vma_m = pax_find_mirror_vma(vma);
94664+#endif
94665+
94666 size = vma->vm_end - address;
94667 grow = (vma->vm_start - address) >> PAGE_SHIFT;
94668
94669@@ -2228,13 +2562,27 @@ int expand_downwards(struct vm_area_struct *vma,
94670 vma->vm_pgoff -= grow;
94671 anon_vma_interval_tree_post_update_vma(vma);
94672 vma_gap_update(vma);
94673+
94674+#ifdef CONFIG_PAX_SEGMEXEC
94675+ if (vma_m) {
94676+ anon_vma_interval_tree_pre_update_vma(vma_m);
94677+ vma_m->vm_start -= grow << PAGE_SHIFT;
94678+ vma_m->vm_pgoff -= grow;
94679+ anon_vma_interval_tree_post_update_vma(vma_m);
94680+ vma_gap_update(vma_m);
94681+ }
94682+#endif
94683+
94684 spin_unlock(&vma->vm_mm->page_table_lock);
94685
94686+ track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
94687 perf_event_mmap(vma);
94688 }
94689 }
94690 }
94691 vma_unlock_anon_vma(vma);
94692+ if (lockprev)
94693+ vma_unlock_anon_vma(prev);
94694 khugepaged_enter_vma_merge(vma);
94695 validate_mm(vma->vm_mm);
94696 return error;
94697@@ -2332,6 +2680,13 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
94698 do {
94699 long nrpages = vma_pages(vma);
94700
94701+#ifdef CONFIG_PAX_SEGMEXEC
94702+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
94703+ vma = remove_vma(vma);
94704+ continue;
94705+ }
94706+#endif
94707+
94708 if (vma->vm_flags & VM_ACCOUNT)
94709 nr_accounted += nrpages;
94710 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
94711@@ -2376,6 +2731,16 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
94712 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
94713 vma->vm_prev = NULL;
94714 do {
94715+
94716+#ifdef CONFIG_PAX_SEGMEXEC
94717+ if (vma->vm_mirror) {
94718+ BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
94719+ vma->vm_mirror->vm_mirror = NULL;
94720+ vma->vm_mirror->vm_flags &= ~VM_EXEC;
94721+ vma->vm_mirror = NULL;
94722+ }
94723+#endif
94724+
94725 vma_rb_erase(vma, &mm->mm_rb);
94726 mm->map_count--;
94727 tail_vma = vma;
94728@@ -2401,14 +2766,33 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
94729 struct vm_area_struct *new;
94730 int err = -ENOMEM;
94731
94732+#ifdef CONFIG_PAX_SEGMEXEC
94733+ struct vm_area_struct *vma_m, *new_m = NULL;
94734+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
94735+#endif
94736+
94737 if (is_vm_hugetlb_page(vma) && (addr &
94738 ~(huge_page_mask(hstate_vma(vma)))))
94739 return -EINVAL;
94740
94741+#ifdef CONFIG_PAX_SEGMEXEC
94742+ vma_m = pax_find_mirror_vma(vma);
94743+#endif
94744+
94745 new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
94746 if (!new)
94747 goto out_err;
94748
94749+#ifdef CONFIG_PAX_SEGMEXEC
94750+ if (vma_m) {
94751+ new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
94752+ if (!new_m) {
94753+ kmem_cache_free(vm_area_cachep, new);
94754+ goto out_err;
94755+ }
94756+ }
94757+#endif
94758+
94759 /* most fields are the same, copy all, and then fixup */
94760 *new = *vma;
94761
94762@@ -2421,6 +2805,22 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
94763 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
94764 }
94765
94766+#ifdef CONFIG_PAX_SEGMEXEC
94767+ if (vma_m) {
94768+ *new_m = *vma_m;
94769+ INIT_LIST_HEAD(&new_m->anon_vma_chain);
94770+ new_m->vm_mirror = new;
94771+ new->vm_mirror = new_m;
94772+
94773+ if (new_below)
94774+ new_m->vm_end = addr_m;
94775+ else {
94776+ new_m->vm_start = addr_m;
94777+ new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
94778+ }
94779+ }
94780+#endif
94781+
94782 err = vma_dup_policy(vma, new);
94783 if (err)
94784 goto out_free_vma;
94785@@ -2440,6 +2840,38 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
94786 else
94787 err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
94788
94789+#ifdef CONFIG_PAX_SEGMEXEC
94790+ if (!err && vma_m) {
94791+ struct mempolicy *pol = vma_policy(new);
94792+
94793+ if (anon_vma_clone(new_m, vma_m))
94794+ goto out_free_mpol;
94795+
94796+ mpol_get(pol);
94797+ set_vma_policy(new_m, pol);
94798+
94799+ if (new_m->vm_file)
94800+ get_file(new_m->vm_file);
94801+
94802+ if (new_m->vm_ops && new_m->vm_ops->open)
94803+ new_m->vm_ops->open(new_m);
94804+
94805+ if (new_below)
94806+ err = vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
94807+ ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
94808+ else
94809+ err = vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
94810+
94811+ if (err) {
94812+ if (new_m->vm_ops && new_m->vm_ops->close)
94813+ new_m->vm_ops->close(new_m);
94814+ if (new_m->vm_file)
94815+ fput(new_m->vm_file);
94816+ mpol_put(pol);
94817+ }
94818+ }
94819+#endif
94820+
94821 /* Success. */
94822 if (!err)
94823 return 0;
94824@@ -2449,10 +2881,18 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
94825 new->vm_ops->close(new);
94826 if (new->vm_file)
94827 fput(new->vm_file);
94828- unlink_anon_vmas(new);
94829 out_free_mpol:
94830 mpol_put(vma_policy(new));
94831 out_free_vma:
94832+
94833+#ifdef CONFIG_PAX_SEGMEXEC
94834+ if (new_m) {
94835+ unlink_anon_vmas(new_m);
94836+ kmem_cache_free(vm_area_cachep, new_m);
94837+ }
94838+#endif
94839+
94840+ unlink_anon_vmas(new);
94841 kmem_cache_free(vm_area_cachep, new);
94842 out_err:
94843 return err;
94844@@ -2465,6 +2905,15 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
94845 int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
94846 unsigned long addr, int new_below)
94847 {
94848+
94849+#ifdef CONFIG_PAX_SEGMEXEC
94850+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
94851+ BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
94852+ if (mm->map_count >= sysctl_max_map_count-1)
94853+ return -ENOMEM;
94854+ } else
94855+#endif
94856+
94857 if (mm->map_count >= sysctl_max_map_count)
94858 return -ENOMEM;
94859
94860@@ -2476,11 +2925,30 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
94861 * work. This now handles partial unmappings.
94862 * Jeremy Fitzhardinge <jeremy@goop.org>
94863 */
94864+#ifdef CONFIG_PAX_SEGMEXEC
94865 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
94866 {
94867+ int ret = __do_munmap(mm, start, len);
94868+ if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
94869+ return ret;
94870+
94871+ return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
94872+}
94873+
94874+int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
94875+#else
94876+int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
94877+#endif
94878+{
94879 unsigned long end;
94880 struct vm_area_struct *vma, *prev, *last;
94881
94882+ /*
94883+ * mm->mmap_sem is required to protect against another thread
94884+ * changing the mappings in case we sleep.
94885+ */
94886+ verify_mm_writelocked(mm);
94887+
94888 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
94889 return -EINVAL;
94890
94891@@ -2555,6 +3023,8 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
94892 /* Fix up all other VM information */
94893 remove_vma_list(mm, vma);
94894
94895+ track_exec_limit(mm, start, end, 0UL);
94896+
94897 return 0;
94898 }
94899
94900@@ -2563,6 +3033,13 @@ int vm_munmap(unsigned long start, size_t len)
94901 int ret;
94902 struct mm_struct *mm = current->mm;
94903
94904+
94905+#ifdef CONFIG_PAX_SEGMEXEC
94906+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
94907+ (len > SEGMEXEC_TASK_SIZE || start > SEGMEXEC_TASK_SIZE-len))
94908+ return -EINVAL;
94909+#endif
94910+
94911 down_write(&mm->mmap_sem);
94912 ret = do_munmap(mm, start, len);
94913 up_write(&mm->mmap_sem);
94914@@ -2576,16 +3053,6 @@ SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
94915 return vm_munmap(addr, len);
94916 }
94917
94918-static inline void verify_mm_writelocked(struct mm_struct *mm)
94919-{
94920-#ifdef CONFIG_DEBUG_VM
94921- if (unlikely(down_read_trylock(&mm->mmap_sem))) {
94922- WARN_ON(1);
94923- up_read(&mm->mmap_sem);
94924- }
94925-#endif
94926-}
94927-
94928 /*
94929 * this is really a simplified "do_mmap". it only handles
94930 * anonymous maps. eventually we may be able to do some
94931@@ -2599,6 +3066,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
94932 struct rb_node ** rb_link, * rb_parent;
94933 pgoff_t pgoff = addr >> PAGE_SHIFT;
94934 int error;
94935+ unsigned long charged;
94936
94937 len = PAGE_ALIGN(len);
94938 if (!len)
94939@@ -2606,10 +3074,24 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
94940
94941 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
94942
94943+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
94944+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
94945+ flags &= ~VM_EXEC;
94946+
94947+#ifdef CONFIG_PAX_MPROTECT
94948+ if (mm->pax_flags & MF_PAX_MPROTECT)
94949+ flags &= ~VM_MAYEXEC;
94950+#endif
94951+
94952+ }
94953+#endif
94954+
94955 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
94956 if (error & ~PAGE_MASK)
94957 return error;
94958
94959+ charged = len >> PAGE_SHIFT;
94960+
94961 error = mlock_future_check(mm, mm->def_flags, len);
94962 if (error)
94963 return error;
94964@@ -2623,21 +3105,20 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
94965 /*
94966 * Clear old maps. this also does some error checking for us
94967 */
94968- munmap_back:
94969 if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) {
94970 if (do_munmap(mm, addr, len))
94971 return -ENOMEM;
94972- goto munmap_back;
94973+ BUG_ON(find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent));
94974 }
94975
94976 /* Check against address space limits *after* clearing old maps... */
94977- if (!may_expand_vm(mm, len >> PAGE_SHIFT))
94978+ if (!may_expand_vm(mm, charged))
94979 return -ENOMEM;
94980
94981 if (mm->map_count > sysctl_max_map_count)
94982 return -ENOMEM;
94983
94984- if (security_vm_enough_memory_mm(mm, len >> PAGE_SHIFT))
94985+ if (security_vm_enough_memory_mm(mm, charged))
94986 return -ENOMEM;
94987
94988 /* Can we just expand an old private anonymous mapping? */
94989@@ -2651,7 +3132,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
94990 */
94991 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
94992 if (!vma) {
94993- vm_unacct_memory(len >> PAGE_SHIFT);
94994+ vm_unacct_memory(charged);
94995 return -ENOMEM;
94996 }
94997
94998@@ -2665,10 +3146,11 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
94999 vma_link(mm, vma, prev, rb_link, rb_parent);
95000 out:
95001 perf_event_mmap(vma);
95002- mm->total_vm += len >> PAGE_SHIFT;
95003+ mm->total_vm += charged;
95004 if (flags & VM_LOCKED)
95005- mm->locked_vm += (len >> PAGE_SHIFT);
95006+ mm->locked_vm += charged;
95007 vma->vm_flags |= VM_SOFTDIRTY;
95008+ track_exec_limit(mm, addr, addr + len, flags);
95009 return addr;
95010 }
95011
95012@@ -2730,6 +3212,7 @@ void exit_mmap(struct mm_struct *mm)
95013 while (vma) {
95014 if (vma->vm_flags & VM_ACCOUNT)
95015 nr_accounted += vma_pages(vma);
95016+ vma->vm_mirror = NULL;
95017 vma = remove_vma(vma);
95018 }
95019 vm_unacct_memory(nr_accounted);
95020@@ -2747,6 +3230,13 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
95021 struct vm_area_struct *prev;
95022 struct rb_node **rb_link, *rb_parent;
95023
95024+#ifdef CONFIG_PAX_SEGMEXEC
95025+ struct vm_area_struct *vma_m = NULL;
95026+#endif
95027+
95028+ if (security_mmap_addr(vma->vm_start))
95029+ return -EPERM;
95030+
95031 /*
95032 * The vm_pgoff of a purely anonymous vma should be irrelevant
95033 * until its first write fault, when page's anon_vma and index
95034@@ -2770,7 +3260,21 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
95035 security_vm_enough_memory_mm(mm, vma_pages(vma)))
95036 return -ENOMEM;
95037
95038+#ifdef CONFIG_PAX_SEGMEXEC
95039+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
95040+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
95041+ if (!vma_m)
95042+ return -ENOMEM;
95043+ }
95044+#endif
95045+
95046 vma_link(mm, vma, prev, rb_link, rb_parent);
95047+
95048+#ifdef CONFIG_PAX_SEGMEXEC
95049+ if (vma_m)
95050+ BUG_ON(pax_mirror_vma(vma_m, vma));
95051+#endif
95052+
95053 return 0;
95054 }
95055
95056@@ -2789,6 +3293,8 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
95057 struct rb_node **rb_link, *rb_parent;
95058 bool faulted_in_anon_vma = true;
95059
95060+ BUG_ON(vma->vm_mirror);
95061+
95062 /*
95063 * If anonymous vma has not yet been faulted, update new pgoff
95064 * to match new location, to increase its chance of merging.
95065@@ -2853,6 +3359,39 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
95066 return NULL;
95067 }
95068
95069+#ifdef CONFIG_PAX_SEGMEXEC
95070+long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
95071+{
95072+ struct vm_area_struct *prev_m;
95073+ struct rb_node **rb_link_m, *rb_parent_m;
95074+ struct mempolicy *pol_m;
95075+
95076+ BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
95077+ BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
95078+ BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
95079+ *vma_m = *vma;
95080+ INIT_LIST_HEAD(&vma_m->anon_vma_chain);
95081+ if (anon_vma_clone(vma_m, vma))
95082+ return -ENOMEM;
95083+ pol_m = vma_policy(vma_m);
95084+ mpol_get(pol_m);
95085+ set_vma_policy(vma_m, pol_m);
95086+ vma_m->vm_start += SEGMEXEC_TASK_SIZE;
95087+ vma_m->vm_end += SEGMEXEC_TASK_SIZE;
95088+ vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
95089+ vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
95090+ if (vma_m->vm_file)
95091+ get_file(vma_m->vm_file);
95092+ if (vma_m->vm_ops && vma_m->vm_ops->open)
95093+ vma_m->vm_ops->open(vma_m);
95094+ BUG_ON(find_vma_links(vma->vm_mm, vma_m->vm_start, vma_m->vm_end, &prev_m, &rb_link_m, &rb_parent_m));
95095+ vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
95096+ vma_m->vm_mirror = vma;
95097+ vma->vm_mirror = vma_m;
95098+ return 0;
95099+}
95100+#endif
95101+
95102 /*
95103 * Return true if the calling process may expand its vm space by the passed
95104 * number of pages
95105@@ -2864,6 +3403,7 @@ int may_expand_vm(struct mm_struct *mm, unsigned long npages)
95106
95107 lim = rlimit(RLIMIT_AS) >> PAGE_SHIFT;
95108
95109+ gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
95110 if (cur + npages > lim)
95111 return 0;
95112 return 1;
95113@@ -2934,6 +3474,22 @@ int install_special_mapping(struct mm_struct *mm,
95114 vma->vm_start = addr;
95115 vma->vm_end = addr + len;
95116
95117+#ifdef CONFIG_PAX_MPROTECT
95118+ if (mm->pax_flags & MF_PAX_MPROTECT) {
95119+#ifndef CONFIG_PAX_MPROTECT_COMPAT
95120+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
95121+ return -EPERM;
95122+ if (!(vm_flags & VM_EXEC))
95123+ vm_flags &= ~VM_MAYEXEC;
95124+#else
95125+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
95126+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
95127+#endif
95128+ else
95129+ vm_flags &= ~VM_MAYWRITE;
95130+ }
95131+#endif
95132+
95133 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND | VM_SOFTDIRTY;
95134 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
95135
95136diff --git a/mm/mprotect.c b/mm/mprotect.c
95137index 769a67a..414d24f 100644
95138--- a/mm/mprotect.c
95139+++ b/mm/mprotect.c
95140@@ -24,10 +24,18 @@
95141 #include <linux/migrate.h>
95142 #include <linux/perf_event.h>
95143 #include <linux/ksm.h>
95144+#include <linux/sched/sysctl.h>
95145+
95146+#ifdef CONFIG_PAX_MPROTECT
95147+#include <linux/elf.h>
95148+#include <linux/binfmts.h>
95149+#endif
95150+
95151 #include <asm/uaccess.h>
95152 #include <asm/pgtable.h>
95153 #include <asm/cacheflush.h>
95154 #include <asm/tlbflush.h>
95155+#include <asm/mmu_context.h>
95156
95157 #ifndef pgprot_modify
95158 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
95159@@ -214,6 +222,48 @@ unsigned long change_protection(struct vm_area_struct *vma, unsigned long start,
95160 return pages;
95161 }
95162
95163+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
95164+/* called while holding the mmap semaphor for writing except stack expansion */
95165+void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
95166+{
95167+ unsigned long oldlimit, newlimit = 0UL;
95168+
95169+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || (__supported_pte_mask & _PAGE_NX))
95170+ return;
95171+
95172+ spin_lock(&mm->page_table_lock);
95173+ oldlimit = mm->context.user_cs_limit;
95174+ if ((prot & VM_EXEC) && oldlimit < end)
95175+ /* USER_CS limit moved up */
95176+ newlimit = end;
95177+ else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
95178+ /* USER_CS limit moved down */
95179+ newlimit = start;
95180+
95181+ if (newlimit) {
95182+ mm->context.user_cs_limit = newlimit;
95183+
95184+#ifdef CONFIG_SMP
95185+ wmb();
95186+ cpus_clear(mm->context.cpu_user_cs_mask);
95187+ cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
95188+#endif
95189+
95190+ set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
95191+ }
95192+ spin_unlock(&mm->page_table_lock);
95193+ if (newlimit == end) {
95194+ struct vm_area_struct *vma = find_vma(mm, oldlimit);
95195+
95196+ for (; vma && vma->vm_start < end; vma = vma->vm_next)
95197+ if (is_vm_hugetlb_page(vma))
95198+ hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
95199+ else
95200+ change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma), 0);
95201+ }
95202+}
95203+#endif
95204+
95205 int
95206 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
95207 unsigned long start, unsigned long end, unsigned long newflags)
95208@@ -226,11 +276,29 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
95209 int error;
95210 int dirty_accountable = 0;
95211
95212+#ifdef CONFIG_PAX_SEGMEXEC
95213+ struct vm_area_struct *vma_m = NULL;
95214+ unsigned long start_m, end_m;
95215+
95216+ start_m = start + SEGMEXEC_TASK_SIZE;
95217+ end_m = end + SEGMEXEC_TASK_SIZE;
95218+#endif
95219+
95220 if (newflags == oldflags) {
95221 *pprev = vma;
95222 return 0;
95223 }
95224
95225+ if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
95226+ struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
95227+
95228+ if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
95229+ return -ENOMEM;
95230+
95231+ if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
95232+ return -ENOMEM;
95233+ }
95234+
95235 /*
95236 * If we make a private mapping writable we increase our commit;
95237 * but (without finer accounting) cannot reduce our commit if we
95238@@ -247,6 +315,42 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
95239 }
95240 }
95241
95242+#ifdef CONFIG_PAX_SEGMEXEC
95243+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
95244+ if (start != vma->vm_start) {
95245+ error = split_vma(mm, vma, start, 1);
95246+ if (error)
95247+ goto fail;
95248+ BUG_ON(!*pprev || (*pprev)->vm_next == vma);
95249+ *pprev = (*pprev)->vm_next;
95250+ }
95251+
95252+ if (end != vma->vm_end) {
95253+ error = split_vma(mm, vma, end, 0);
95254+ if (error)
95255+ goto fail;
95256+ }
95257+
95258+ if (pax_find_mirror_vma(vma)) {
95259+ error = __do_munmap(mm, start_m, end_m - start_m);
95260+ if (error)
95261+ goto fail;
95262+ } else {
95263+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
95264+ if (!vma_m) {
95265+ error = -ENOMEM;
95266+ goto fail;
95267+ }
95268+ vma->vm_flags = newflags;
95269+ error = pax_mirror_vma(vma_m, vma);
95270+ if (error) {
95271+ vma->vm_flags = oldflags;
95272+ goto fail;
95273+ }
95274+ }
95275+ }
95276+#endif
95277+
95278 /*
95279 * First try to merge with previous and/or next vma.
95280 */
95281@@ -277,9 +381,21 @@ success:
95282 * vm_flags and vm_page_prot are protected by the mmap_sem
95283 * held in write mode.
95284 */
95285+
95286+#ifdef CONFIG_PAX_SEGMEXEC
95287+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
95288+ pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
95289+#endif
95290+
95291 vma->vm_flags = newflags;
95292+
95293+#ifdef CONFIG_PAX_MPROTECT
95294+ if (mm->binfmt && mm->binfmt->handle_mprotect)
95295+ mm->binfmt->handle_mprotect(vma, newflags);
95296+#endif
95297+
95298 vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
95299- vm_get_page_prot(newflags));
95300+ vm_get_page_prot(vma->vm_flags));
95301
95302 if (vma_wants_writenotify(vma)) {
95303 vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
95304@@ -318,6 +434,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
95305 end = start + len;
95306 if (end <= start)
95307 return -ENOMEM;
95308+
95309+#ifdef CONFIG_PAX_SEGMEXEC
95310+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
95311+ if (end > SEGMEXEC_TASK_SIZE)
95312+ return -EINVAL;
95313+ } else
95314+#endif
95315+
95316+ if (end > TASK_SIZE)
95317+ return -EINVAL;
95318+
95319 if (!arch_validate_prot(prot))
95320 return -EINVAL;
95321
95322@@ -325,7 +452,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
95323 /*
95324 * Does the application expect PROT_READ to imply PROT_EXEC:
95325 */
95326- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
95327+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
95328 prot |= PROT_EXEC;
95329
95330 vm_flags = calc_vm_prot_bits(prot);
95331@@ -357,6 +484,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
95332 if (start > vma->vm_start)
95333 prev = vma;
95334
95335+#ifdef CONFIG_PAX_MPROTECT
95336+ if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
95337+ current->mm->binfmt->handle_mprotect(vma, vm_flags);
95338+#endif
95339+
95340 for (nstart = start ; ; ) {
95341 unsigned long newflags;
95342
95343@@ -367,6 +499,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
95344
95345 /* newflags >> 4 shift VM_MAY% in place of VM_% */
95346 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
95347+ if (prot & (PROT_WRITE | PROT_EXEC))
95348+ gr_log_rwxmprotect(vma);
95349+
95350+ error = -EACCES;
95351+ goto out;
95352+ }
95353+
95354+ if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
95355 error = -EACCES;
95356 goto out;
95357 }
95358@@ -381,6 +521,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
95359 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
95360 if (error)
95361 goto out;
95362+
95363+ track_exec_limit(current->mm, nstart, tmp, vm_flags);
95364+
95365 nstart = tmp;
95366
95367 if (nstart < prev->vm_end)
95368diff --git a/mm/mremap.c b/mm/mremap.c
95369index 05f1180..c3cde48 100644
95370--- a/mm/mremap.c
95371+++ b/mm/mremap.c
95372@@ -144,6 +144,12 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
95373 continue;
95374 pte = ptep_get_and_clear(mm, old_addr, old_pte);
95375 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
95376+
95377+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
95378+ if (!(__supported_pte_mask & _PAGE_NX) && pte_present(pte) && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
95379+ pte = pte_exprotect(pte);
95380+#endif
95381+
95382 pte = move_soft_dirty_pte(pte);
95383 set_pte_at(mm, new_addr, new_pte, pte);
95384 }
95385@@ -344,6 +350,11 @@ static struct vm_area_struct *vma_to_resize(unsigned long addr,
95386 if (is_vm_hugetlb_page(vma))
95387 goto Einval;
95388
95389+#ifdef CONFIG_PAX_SEGMEXEC
95390+ if (pax_find_mirror_vma(vma))
95391+ goto Einval;
95392+#endif
95393+
95394 /* We can't remap across vm area boundaries */
95395 if (old_len > vma->vm_end - addr)
95396 goto Efault;
95397@@ -399,20 +410,25 @@ static unsigned long mremap_to(unsigned long addr, unsigned long old_len,
95398 unsigned long ret = -EINVAL;
95399 unsigned long charged = 0;
95400 unsigned long map_flags;
95401+ unsigned long pax_task_size = TASK_SIZE;
95402
95403 if (new_addr & ~PAGE_MASK)
95404 goto out;
95405
95406- if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
95407+#ifdef CONFIG_PAX_SEGMEXEC
95408+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
95409+ pax_task_size = SEGMEXEC_TASK_SIZE;
95410+#endif
95411+
95412+ pax_task_size -= PAGE_SIZE;
95413+
95414+ if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
95415 goto out;
95416
95417 /* Check if the location we're moving into overlaps the
95418 * old location at all, and fail if it does.
95419 */
95420- if ((new_addr <= addr) && (new_addr+new_len) > addr)
95421- goto out;
95422-
95423- if ((addr <= new_addr) && (addr+old_len) > new_addr)
95424+ if (addr + old_len > new_addr && new_addr + new_len > addr)
95425 goto out;
95426
95427 ret = do_munmap(mm, new_addr, new_len);
95428@@ -481,6 +497,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
95429 unsigned long ret = -EINVAL;
95430 unsigned long charged = 0;
95431 bool locked = false;
95432+ unsigned long pax_task_size = TASK_SIZE;
95433
95434 if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
95435 return ret;
95436@@ -502,6 +519,17 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
95437 if (!new_len)
95438 return ret;
95439
95440+#ifdef CONFIG_PAX_SEGMEXEC
95441+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
95442+ pax_task_size = SEGMEXEC_TASK_SIZE;
95443+#endif
95444+
95445+ pax_task_size -= PAGE_SIZE;
95446+
95447+ if (new_len > pax_task_size || addr > pax_task_size-new_len ||
95448+ old_len > pax_task_size || addr > pax_task_size-old_len)
95449+ return ret;
95450+
95451 down_write(&current->mm->mmap_sem);
95452
95453 if (flags & MREMAP_FIXED) {
95454@@ -552,6 +580,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
95455 new_addr = addr;
95456 }
95457 ret = addr;
95458+ track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
95459 goto out;
95460 }
95461 }
95462@@ -575,7 +604,12 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
95463 goto out;
95464 }
95465
95466+ map_flags = vma->vm_flags;
95467 ret = move_vma(vma, addr, old_len, new_len, new_addr, &locked);
95468+ if (!(ret & ~PAGE_MASK)) {
95469+ track_exec_limit(current->mm, addr, addr + old_len, 0UL);
95470+ track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
95471+ }
95472 }
95473 out:
95474 if (ret & ~PAGE_MASK)
95475diff --git a/mm/nommu.c b/mm/nommu.c
95476index 8740213..f87e25b 100644
95477--- a/mm/nommu.c
95478+++ b/mm/nommu.c
95479@@ -65,7 +65,6 @@ int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
95480 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
95481 unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */
95482 unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */
95483-int heap_stack_gap = 0;
95484
95485 atomic_long_t mmap_pages_allocated;
95486
95487@@ -845,15 +844,6 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
95488 EXPORT_SYMBOL(find_vma);
95489
95490 /*
95491- * find a VMA
95492- * - we don't extend stack VMAs under NOMMU conditions
95493- */
95494-struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
95495-{
95496- return find_vma(mm, addr);
95497-}
95498-
95499-/*
95500 * expand a stack to a given address
95501 * - not supported under NOMMU conditions
95502 */
95503@@ -1564,6 +1554,7 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
95504
95505 /* most fields are the same, copy all, and then fixup */
95506 *new = *vma;
95507+ INIT_LIST_HEAD(&new->anon_vma_chain);
95508 *region = *vma->vm_region;
95509 new->vm_region = region;
95510
95511@@ -1993,8 +1984,8 @@ int generic_file_remap_pages(struct vm_area_struct *vma, unsigned long addr,
95512 }
95513 EXPORT_SYMBOL(generic_file_remap_pages);
95514
95515-static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
95516- unsigned long addr, void *buf, int len, int write)
95517+static ssize_t __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
95518+ unsigned long addr, void *buf, size_t len, int write)
95519 {
95520 struct vm_area_struct *vma;
95521
95522@@ -2035,8 +2026,8 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
95523 *
95524 * The caller must hold a reference on @mm.
95525 */
95526-int access_remote_vm(struct mm_struct *mm, unsigned long addr,
95527- void *buf, int len, int write)
95528+ssize_t access_remote_vm(struct mm_struct *mm, unsigned long addr,
95529+ void *buf, size_t len, int write)
95530 {
95531 return __access_remote_vm(NULL, mm, addr, buf, len, write);
95532 }
95533@@ -2045,7 +2036,7 @@ int access_remote_vm(struct mm_struct *mm, unsigned long addr,
95534 * Access another process' address space.
95535 * - source/target buffer must be kernel space
95536 */
95537-int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write)
95538+ssize_t access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, size_t len, int write)
95539 {
95540 struct mm_struct *mm;
95541
95542diff --git a/mm/page-writeback.c b/mm/page-writeback.c
95543index 8f6daa6..1f8587c 100644
95544--- a/mm/page-writeback.c
95545+++ b/mm/page-writeback.c
95546@@ -685,7 +685,7 @@ static long long pos_ratio_polynom(unsigned long setpoint,
95547 * card's bdi_dirty may rush to many times higher than bdi_setpoint.
95548 * - the bdi dirty thresh drops quickly due to change of JBOD workload
95549 */
95550-static unsigned long bdi_position_ratio(struct backing_dev_info *bdi,
95551+static unsigned long __intentional_overflow(-1) bdi_position_ratio(struct backing_dev_info *bdi,
95552 unsigned long thresh,
95553 unsigned long bg_thresh,
95554 unsigned long dirty,
95555diff --git a/mm/page_alloc.c b/mm/page_alloc.c
95556index 7387a67..67105e4 100644
95557--- a/mm/page_alloc.c
95558+++ b/mm/page_alloc.c
95559@@ -61,6 +61,7 @@
95560 #include <linux/page-debug-flags.h>
95561 #include <linux/hugetlb.h>
95562 #include <linux/sched/rt.h>
95563+#include <linux/random.h>
95564
95565 #include <asm/sections.h>
95566 #include <asm/tlbflush.h>
95567@@ -354,7 +355,7 @@ out:
95568 * This usage means that zero-order pages may not be compound.
95569 */
95570
95571-static void free_compound_page(struct page *page)
95572+void free_compound_page(struct page *page)
95573 {
95574 __free_pages_ok(page, compound_order(page));
95575 }
95576@@ -728,6 +729,10 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
95577 int i;
95578 int bad = 0;
95579
95580+#ifdef CONFIG_PAX_MEMORY_SANITIZE
95581+ unsigned long index = 1UL << order;
95582+#endif
95583+
95584 trace_mm_page_free(page, order);
95585 kmemcheck_free_shadow(page, order);
95586
95587@@ -744,6 +749,12 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
95588 debug_check_no_obj_freed(page_address(page),
95589 PAGE_SIZE << order);
95590 }
95591+
95592+#ifdef CONFIG_PAX_MEMORY_SANITIZE
95593+ for (; index; --index)
95594+ sanitize_highpage(page + index - 1);
95595+#endif
95596+
95597 arch_free_page(page, order);
95598 kernel_map_pages(page, 1 << order, 0);
95599
95600@@ -766,6 +777,20 @@ static void __free_pages_ok(struct page *page, unsigned int order)
95601 local_irq_restore(flags);
95602 }
95603
95604+#ifdef CONFIG_PAX_LATENT_ENTROPY
95605+bool __meminitdata extra_latent_entropy;
95606+
95607+static int __init setup_pax_extra_latent_entropy(char *str)
95608+{
95609+ extra_latent_entropy = true;
95610+ return 0;
95611+}
95612+early_param("pax_extra_latent_entropy", setup_pax_extra_latent_entropy);
95613+
95614+volatile u64 latent_entropy __latent_entropy;
95615+EXPORT_SYMBOL(latent_entropy);
95616+#endif
95617+
95618 void __init __free_pages_bootmem(struct page *page, unsigned int order)
95619 {
95620 unsigned int nr_pages = 1 << order;
95621@@ -781,6 +806,19 @@ void __init __free_pages_bootmem(struct page *page, unsigned int order)
95622 __ClearPageReserved(p);
95623 set_page_count(p, 0);
95624
95625+#ifdef CONFIG_PAX_LATENT_ENTROPY
95626+ if (extra_latent_entropy && !PageHighMem(page) && page_to_pfn(page) < 0x100000) {
95627+ u64 hash = 0;
95628+ size_t index, end = PAGE_SIZE * nr_pages / sizeof hash;
95629+ const u64 *data = lowmem_page_address(page);
95630+
95631+ for (index = 0; index < end; index++)
95632+ hash ^= hash + data[index];
95633+ latent_entropy ^= hash;
95634+ add_device_randomness((const void *)&latent_entropy, sizeof(latent_entropy));
95635+ }
95636+#endif
95637+
95638 page_zone(page)->managed_pages += nr_pages;
95639 set_page_refcounted(page);
95640 __free_pages(page, order);
95641@@ -897,8 +935,10 @@ static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
95642 arch_alloc_page(page, order);
95643 kernel_map_pages(page, 1 << order, 1);
95644
95645+#ifndef CONFIG_PAX_MEMORY_SANITIZE
95646 if (gfp_flags & __GFP_ZERO)
95647 prep_zero_page(page, order, gfp_flags);
95648+#endif
95649
95650 if (order && (gfp_flags & __GFP_COMP))
95651 prep_compound_page(page, order);
95652@@ -2401,7 +2441,7 @@ static void reset_alloc_batches(struct zonelist *zonelist,
95653 continue;
95654 mod_zone_page_state(zone, NR_ALLOC_BATCH,
95655 high_wmark_pages(zone) - low_wmark_pages(zone) -
95656- atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]));
95657+ atomic_long_read_unchecked(&zone->vm_stat[NR_ALLOC_BATCH]));
95658 }
95659 }
95660
95661@@ -6565,4 +6605,4 @@ void dump_page(struct page *page, char *reason)
95662 {
95663 dump_page_badflags(page, reason, 0);
95664 }
95665-EXPORT_SYMBOL_GPL(dump_page);
95666+EXPORT_SYMBOL(dump_page);
95667diff --git a/mm/page_io.c b/mm/page_io.c
95668index 7c59ef6..1358905 100644
95669--- a/mm/page_io.c
95670+++ b/mm/page_io.c
95671@@ -260,7 +260,7 @@ int __swap_writepage(struct page *page, struct writeback_control *wbc,
95672 struct file *swap_file = sis->swap_file;
95673 struct address_space *mapping = swap_file->f_mapping;
95674 struct iovec iov = {
95675- .iov_base = kmap(page),
95676+ .iov_base = (void __force_user *)kmap(page),
95677 .iov_len = PAGE_SIZE,
95678 };
95679
95680diff --git a/mm/percpu.c b/mm/percpu.c
95681index a2a54a8..43ecb68 100644
95682--- a/mm/percpu.c
95683+++ b/mm/percpu.c
95684@@ -122,7 +122,7 @@ static unsigned int pcpu_low_unit_cpu __read_mostly;
95685 static unsigned int pcpu_high_unit_cpu __read_mostly;
95686
95687 /* the address of the first chunk which starts with the kernel static area */
95688-void *pcpu_base_addr __read_mostly;
95689+void *pcpu_base_addr __read_only;
95690 EXPORT_SYMBOL_GPL(pcpu_base_addr);
95691
95692 static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
95693diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c
95694index fd26d04..0cea1b0 100644
95695--- a/mm/process_vm_access.c
95696+++ b/mm/process_vm_access.c
95697@@ -13,6 +13,7 @@
95698 #include <linux/uio.h>
95699 #include <linux/sched.h>
95700 #include <linux/highmem.h>
95701+#include <linux/security.h>
95702 #include <linux/ptrace.h>
95703 #include <linux/slab.h>
95704 #include <linux/syscalls.h>
95705@@ -258,19 +259,19 @@ static ssize_t process_vm_rw_core(pid_t pid, const struct iovec *lvec,
95706 size_t iov_l_curr_offset = 0;
95707 ssize_t iov_len;
95708
95709+ return -ENOSYS; // PaX: until properly audited
95710+
95711 /*
95712 * Work out how many pages of struct pages we're going to need
95713 * when eventually calling get_user_pages
95714 */
95715 for (i = 0; i < riovcnt; i++) {
95716 iov_len = rvec[i].iov_len;
95717- if (iov_len > 0) {
95718- nr_pages_iov = ((unsigned long)rvec[i].iov_base
95719- + iov_len)
95720- / PAGE_SIZE - (unsigned long)rvec[i].iov_base
95721- / PAGE_SIZE + 1;
95722- nr_pages = max(nr_pages, nr_pages_iov);
95723- }
95724+ if (iov_len <= 0)
95725+ continue;
95726+ nr_pages_iov = ((unsigned long)rvec[i].iov_base + iov_len) / PAGE_SIZE -
95727+ (unsigned long)rvec[i].iov_base / PAGE_SIZE + 1;
95728+ nr_pages = max(nr_pages, nr_pages_iov);
95729 }
95730
95731 if (nr_pages == 0)
95732@@ -298,6 +299,11 @@ static ssize_t process_vm_rw_core(pid_t pid, const struct iovec *lvec,
95733 goto free_proc_pages;
95734 }
95735
95736+ if (gr_handle_ptrace(task, vm_write ? PTRACE_POKETEXT : PTRACE_ATTACH)) {
95737+ rc = -EPERM;
95738+ goto put_task_struct;
95739+ }
95740+
95741 mm = mm_access(task, PTRACE_MODE_ATTACH);
95742 if (!mm || IS_ERR(mm)) {
95743 rc = IS_ERR(mm) ? PTR_ERR(mm) : -ESRCH;
95744diff --git a/mm/rmap.c b/mm/rmap.c
95745index d3cbac5..3784601 100644
95746--- a/mm/rmap.c
95747+++ b/mm/rmap.c
95748@@ -163,6 +163,10 @@ int anon_vma_prepare(struct vm_area_struct *vma)
95749 struct anon_vma *anon_vma = vma->anon_vma;
95750 struct anon_vma_chain *avc;
95751
95752+#ifdef CONFIG_PAX_SEGMEXEC
95753+ struct anon_vma_chain *avc_m = NULL;
95754+#endif
95755+
95756 might_sleep();
95757 if (unlikely(!anon_vma)) {
95758 struct mm_struct *mm = vma->vm_mm;
95759@@ -172,6 +176,12 @@ int anon_vma_prepare(struct vm_area_struct *vma)
95760 if (!avc)
95761 goto out_enomem;
95762
95763+#ifdef CONFIG_PAX_SEGMEXEC
95764+ avc_m = anon_vma_chain_alloc(GFP_KERNEL);
95765+ if (!avc_m)
95766+ goto out_enomem_free_avc;
95767+#endif
95768+
95769 anon_vma = find_mergeable_anon_vma(vma);
95770 allocated = NULL;
95771 if (!anon_vma) {
95772@@ -185,6 +195,18 @@ int anon_vma_prepare(struct vm_area_struct *vma)
95773 /* page_table_lock to protect against threads */
95774 spin_lock(&mm->page_table_lock);
95775 if (likely(!vma->anon_vma)) {
95776+
95777+#ifdef CONFIG_PAX_SEGMEXEC
95778+ struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
95779+
95780+ if (vma_m) {
95781+ BUG_ON(vma_m->anon_vma);
95782+ vma_m->anon_vma = anon_vma;
95783+ anon_vma_chain_link(vma_m, avc_m, anon_vma);
95784+ avc_m = NULL;
95785+ }
95786+#endif
95787+
95788 vma->anon_vma = anon_vma;
95789 anon_vma_chain_link(vma, avc, anon_vma);
95790 allocated = NULL;
95791@@ -195,12 +217,24 @@ int anon_vma_prepare(struct vm_area_struct *vma)
95792
95793 if (unlikely(allocated))
95794 put_anon_vma(allocated);
95795+
95796+#ifdef CONFIG_PAX_SEGMEXEC
95797+ if (unlikely(avc_m))
95798+ anon_vma_chain_free(avc_m);
95799+#endif
95800+
95801 if (unlikely(avc))
95802 anon_vma_chain_free(avc);
95803 }
95804 return 0;
95805
95806 out_enomem_free_avc:
95807+
95808+#ifdef CONFIG_PAX_SEGMEXEC
95809+ if (avc_m)
95810+ anon_vma_chain_free(avc_m);
95811+#endif
95812+
95813 anon_vma_chain_free(avc);
95814 out_enomem:
95815 return -ENOMEM;
95816@@ -236,7 +270,7 @@ static inline void unlock_anon_vma_root(struct anon_vma *root)
95817 * Attach the anon_vmas from src to dst.
95818 * Returns 0 on success, -ENOMEM on failure.
95819 */
95820-int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
95821+int anon_vma_clone(struct vm_area_struct *dst, const struct vm_area_struct *src)
95822 {
95823 struct anon_vma_chain *avc, *pavc;
95824 struct anon_vma *root = NULL;
95825@@ -269,7 +303,7 @@ int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
95826 * the corresponding VMA in the parent process is attached to.
95827 * Returns 0 on success, non-zero on failure.
95828 */
95829-int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
95830+int anon_vma_fork(struct vm_area_struct *vma, const struct vm_area_struct *pvma)
95831 {
95832 struct anon_vma_chain *avc;
95833 struct anon_vma *anon_vma;
95834@@ -373,8 +407,10 @@ static void anon_vma_ctor(void *data)
95835 void __init anon_vma_init(void)
95836 {
95837 anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma),
95838- 0, SLAB_DESTROY_BY_RCU|SLAB_PANIC, anon_vma_ctor);
95839- anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain, SLAB_PANIC);
95840+ 0, SLAB_DESTROY_BY_RCU|SLAB_PANIC|SLAB_NO_SANITIZE,
95841+ anon_vma_ctor);
95842+ anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain,
95843+ SLAB_PANIC|SLAB_NO_SANITIZE);
95844 }
95845
95846 /*
95847@@ -1554,10 +1590,9 @@ void __put_anon_vma(struct anon_vma *anon_vma)
95848 {
95849 struct anon_vma *root = anon_vma->root;
95850
95851+ anon_vma_free(anon_vma);
95852 if (root != anon_vma && atomic_dec_and_test(&root->refcount))
95853 anon_vma_free(root);
95854-
95855- anon_vma_free(anon_vma);
95856 }
95857
95858 static struct anon_vma *rmap_walk_anon_lock(struct page *page,
95859diff --git a/mm/shmem.c b/mm/shmem.c
95860index 1f18c9d..3e03d33 100644
95861--- a/mm/shmem.c
95862+++ b/mm/shmem.c
95863@@ -33,7 +33,7 @@
95864 #include <linux/swap.h>
95865 #include <linux/aio.h>
95866
95867-static struct vfsmount *shm_mnt;
95868+struct vfsmount *shm_mnt;
95869
95870 #ifdef CONFIG_SHMEM
95871 /*
95872@@ -77,7 +77,7 @@ static struct vfsmount *shm_mnt;
95873 #define BOGO_DIRENT_SIZE 20
95874
95875 /* Symlink up to this size is kmalloc'ed instead of using a swappable page */
95876-#define SHORT_SYMLINK_LEN 128
95877+#define SHORT_SYMLINK_LEN 64
95878
95879 /*
95880 * shmem_fallocate and shmem_writepage communicate via inode->i_private
95881@@ -2218,6 +2218,11 @@ static const struct xattr_handler *shmem_xattr_handlers[] = {
95882 static int shmem_xattr_validate(const char *name)
95883 {
95884 struct { const char *prefix; size_t len; } arr[] = {
95885+
95886+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
95887+ { XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN},
95888+#endif
95889+
95890 { XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN },
95891 { XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN }
95892 };
95893@@ -2273,6 +2278,15 @@ static int shmem_setxattr(struct dentry *dentry, const char *name,
95894 if (err)
95895 return err;
95896
95897+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
95898+ if (!strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN)) {
95899+ if (strcmp(name, XATTR_NAME_PAX_FLAGS))
95900+ return -EOPNOTSUPP;
95901+ if (size > 8)
95902+ return -EINVAL;
95903+ }
95904+#endif
95905+
95906 return simple_xattr_set(&info->xattrs, name, value, size, flags);
95907 }
95908
95909@@ -2585,8 +2599,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
95910 int err = -ENOMEM;
95911
95912 /* Round up to L1_CACHE_BYTES to resist false sharing */
95913- sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
95914- L1_CACHE_BYTES), GFP_KERNEL);
95915+ sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
95916 if (!sbinfo)
95917 return -ENOMEM;
95918
95919diff --git a/mm/slab.c b/mm/slab.c
95920index b264214..83872cd 100644
95921--- a/mm/slab.c
95922+++ b/mm/slab.c
95923@@ -300,10 +300,12 @@ static void kmem_cache_node_init(struct kmem_cache_node *parent)
95924 if ((x)->max_freeable < i) \
95925 (x)->max_freeable = i; \
95926 } while (0)
95927-#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
95928-#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
95929-#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
95930-#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
95931+#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit)
95932+#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss)
95933+#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit)
95934+#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss)
95935+#define STATS_INC_SANITIZED(x) atomic_inc_unchecked(&(x)->sanitized)
95936+#define STATS_INC_NOT_SANITIZED(x) atomic_inc_unchecked(&(x)->not_sanitized)
95937 #else
95938 #define STATS_INC_ACTIVE(x) do { } while (0)
95939 #define STATS_DEC_ACTIVE(x) do { } while (0)
95940@@ -320,6 +322,8 @@ static void kmem_cache_node_init(struct kmem_cache_node *parent)
95941 #define STATS_INC_ALLOCMISS(x) do { } while (0)
95942 #define STATS_INC_FREEHIT(x) do { } while (0)
95943 #define STATS_INC_FREEMISS(x) do { } while (0)
95944+#define STATS_INC_SANITIZED(x) do { } while (0)
95945+#define STATS_INC_NOT_SANITIZED(x) do { } while (0)
95946 #endif
95947
95948 #if DEBUG
95949@@ -403,7 +407,7 @@ static inline void *index_to_obj(struct kmem_cache *cache, struct page *page,
95950 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
95951 */
95952 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
95953- const struct page *page, void *obj)
95954+ const struct page *page, const void *obj)
95955 {
95956 u32 offset = (obj - page->s_mem);
95957 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
95958@@ -1489,12 +1493,12 @@ void __init kmem_cache_init(void)
95959 */
95960
95961 kmalloc_caches[INDEX_AC] = create_kmalloc_cache("kmalloc-ac",
95962- kmalloc_size(INDEX_AC), ARCH_KMALLOC_FLAGS);
95963+ kmalloc_size(INDEX_AC), SLAB_USERCOPY | ARCH_KMALLOC_FLAGS);
95964
95965 if (INDEX_AC != INDEX_NODE)
95966 kmalloc_caches[INDEX_NODE] =
95967 create_kmalloc_cache("kmalloc-node",
95968- kmalloc_size(INDEX_NODE), ARCH_KMALLOC_FLAGS);
95969+ kmalloc_size(INDEX_NODE), SLAB_USERCOPY | ARCH_KMALLOC_FLAGS);
95970
95971 slab_early_init = 0;
95972
95973@@ -3428,6 +3432,21 @@ static inline void __cache_free(struct kmem_cache *cachep, void *objp,
95974 struct array_cache *ac = cpu_cache_get(cachep);
95975
95976 check_irq_off();
95977+
95978+#ifdef CONFIG_PAX_MEMORY_SANITIZE
95979+ if (pax_sanitize_slab) {
95980+ if (!(cachep->flags & (SLAB_POISON | SLAB_NO_SANITIZE))) {
95981+ memset(objp, PAX_MEMORY_SANITIZE_VALUE, cachep->object_size);
95982+
95983+ if (cachep->ctor)
95984+ cachep->ctor(objp);
95985+
95986+ STATS_INC_SANITIZED(cachep);
95987+ } else
95988+ STATS_INC_NOT_SANITIZED(cachep);
95989+ }
95990+#endif
95991+
95992 kmemleak_free_recursive(objp, cachep->flags);
95993 objp = cache_free_debugcheck(cachep, objp, caller);
95994
95995@@ -3656,6 +3675,7 @@ void kfree(const void *objp)
95996
95997 if (unlikely(ZERO_OR_NULL_PTR(objp)))
95998 return;
95999+ VM_BUG_ON(!virt_addr_valid(objp));
96000 local_irq_save(flags);
96001 kfree_debugcheck(objp);
96002 c = virt_to_cache(objp);
96003@@ -4097,14 +4117,22 @@ void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *cachep)
96004 }
96005 /* cpu stats */
96006 {
96007- unsigned long allochit = atomic_read(&cachep->allochit);
96008- unsigned long allocmiss = atomic_read(&cachep->allocmiss);
96009- unsigned long freehit = atomic_read(&cachep->freehit);
96010- unsigned long freemiss = atomic_read(&cachep->freemiss);
96011+ unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
96012+ unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
96013+ unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
96014+ unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
96015
96016 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
96017 allochit, allocmiss, freehit, freemiss);
96018 }
96019+#ifdef CONFIG_PAX_MEMORY_SANITIZE
96020+ {
96021+ unsigned long sanitized = atomic_read_unchecked(&cachep->sanitized);
96022+ unsigned long not_sanitized = atomic_read_unchecked(&cachep->not_sanitized);
96023+
96024+ seq_printf(m, " : pax %6lu %6lu", sanitized, not_sanitized);
96025+ }
96026+#endif
96027 #endif
96028 }
96029
96030@@ -4334,13 +4362,69 @@ static const struct file_operations proc_slabstats_operations = {
96031 static int __init slab_proc_init(void)
96032 {
96033 #ifdef CONFIG_DEBUG_SLAB_LEAK
96034- proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
96035+ proc_create("slab_allocators", S_IRUSR, NULL, &proc_slabstats_operations);
96036 #endif
96037 return 0;
96038 }
96039 module_init(slab_proc_init);
96040 #endif
96041
96042+bool is_usercopy_object(const void *ptr)
96043+{
96044+ struct page *page;
96045+ struct kmem_cache *cachep;
96046+
96047+ if (ZERO_OR_NULL_PTR(ptr))
96048+ return false;
96049+
96050+ if (!slab_is_available())
96051+ return false;
96052+
96053+ if (!virt_addr_valid(ptr))
96054+ return false;
96055+
96056+ page = virt_to_head_page(ptr);
96057+
96058+ if (!PageSlab(page))
96059+ return false;
96060+
96061+ cachep = page->slab_cache;
96062+ return cachep->flags & SLAB_USERCOPY;
96063+}
96064+
96065+#ifdef CONFIG_PAX_USERCOPY
96066+const char *check_heap_object(const void *ptr, unsigned long n)
96067+{
96068+ struct page *page;
96069+ struct kmem_cache *cachep;
96070+ unsigned int objnr;
96071+ unsigned long offset;
96072+
96073+ if (ZERO_OR_NULL_PTR(ptr))
96074+ return "<null>";
96075+
96076+ if (!virt_addr_valid(ptr))
96077+ return NULL;
96078+
96079+ page = virt_to_head_page(ptr);
96080+
96081+ if (!PageSlab(page))
96082+ return NULL;
96083+
96084+ cachep = page->slab_cache;
96085+ if (!(cachep->flags & SLAB_USERCOPY))
96086+ return cachep->name;
96087+
96088+ objnr = obj_to_index(cachep, page, ptr);
96089+ BUG_ON(objnr >= cachep->num);
96090+ offset = ptr - index_to_obj(cachep, page, objnr) - obj_offset(cachep);
96091+ if (offset <= cachep->object_size && n <= cachep->object_size - offset)
96092+ return NULL;
96093+
96094+ return cachep->name;
96095+}
96096+#endif
96097+
96098 /**
96099 * ksize - get the actual amount of memory allocated for a given object
96100 * @objp: Pointer to the object
96101diff --git a/mm/slab.h b/mm/slab.h
96102index 8184a7c..ab27737 100644
96103--- a/mm/slab.h
96104+++ b/mm/slab.h
96105@@ -32,6 +32,15 @@ extern struct list_head slab_caches;
96106 /* The slab cache that manages slab cache information */
96107 extern struct kmem_cache *kmem_cache;
96108
96109+#ifdef CONFIG_PAX_MEMORY_SANITIZE
96110+#ifdef CONFIG_X86_64
96111+#define PAX_MEMORY_SANITIZE_VALUE '\xfe'
96112+#else
96113+#define PAX_MEMORY_SANITIZE_VALUE '\xff'
96114+#endif
96115+extern bool pax_sanitize_slab;
96116+#endif
96117+
96118 unsigned long calculate_alignment(unsigned long flags,
96119 unsigned long align, unsigned long size);
96120
96121@@ -67,7 +76,8 @@ __kmem_cache_alias(struct mem_cgroup *memcg, const char *name, size_t size,
96122
96123 /* Legal flag mask for kmem_cache_create(), for various configurations */
96124 #define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | SLAB_PANIC | \
96125- SLAB_DESTROY_BY_RCU | SLAB_DEBUG_OBJECTS )
96126+ SLAB_DESTROY_BY_RCU | SLAB_DEBUG_OBJECTS | \
96127+ SLAB_USERCOPY | SLAB_NO_SANITIZE)
96128
96129 #if defined(CONFIG_DEBUG_SLAB)
96130 #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
96131@@ -257,6 +267,9 @@ static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
96132 return s;
96133
96134 page = virt_to_head_page(x);
96135+
96136+ BUG_ON(!PageSlab(page));
96137+
96138 cachep = page->slab_cache;
96139 if (slab_equal_or_root(cachep, s))
96140 return cachep;
96141diff --git a/mm/slab_common.c b/mm/slab_common.c
96142index 1ec3c61..2067c11 100644
96143--- a/mm/slab_common.c
96144+++ b/mm/slab_common.c
96145@@ -23,11 +23,22 @@
96146
96147 #include "slab.h"
96148
96149-enum slab_state slab_state;
96150+enum slab_state slab_state __read_only;
96151 LIST_HEAD(slab_caches);
96152 DEFINE_MUTEX(slab_mutex);
96153 struct kmem_cache *kmem_cache;
96154
96155+#ifdef CONFIG_PAX_MEMORY_SANITIZE
96156+bool pax_sanitize_slab __read_only = true;
96157+static int __init pax_sanitize_slab_setup(char *str)
96158+{
96159+ pax_sanitize_slab = !!simple_strtol(str, NULL, 0);
96160+ printk("%sabled PaX slab sanitization\n", pax_sanitize_slab ? "En" : "Dis");
96161+ return 1;
96162+}
96163+__setup("pax_sanitize_slab=", pax_sanitize_slab_setup);
96164+#endif
96165+
96166 #ifdef CONFIG_DEBUG_VM
96167 static int kmem_cache_sanity_check(struct mem_cgroup *memcg, const char *name,
96168 size_t size)
96169@@ -225,7 +236,7 @@ kmem_cache_create_memcg(struct mem_cgroup *memcg, const char *name, size_t size,
96170 if (err)
96171 goto out_free_cache;
96172
96173- s->refcount = 1;
96174+ atomic_set(&s->refcount, 1);
96175 list_add(&s->list, &slab_caches);
96176 memcg_register_cache(s);
96177
96178@@ -278,8 +289,7 @@ void kmem_cache_destroy(struct kmem_cache *s)
96179
96180 get_online_cpus();
96181 mutex_lock(&slab_mutex);
96182- s->refcount--;
96183- if (!s->refcount) {
96184+ if (atomic_dec_and_test(&s->refcount)) {
96185 list_del(&s->list);
96186
96187 if (!__kmem_cache_shutdown(s)) {
96188@@ -326,7 +336,7 @@ void __init create_boot_cache(struct kmem_cache *s, const char *name, size_t siz
96189 panic("Creation of kmalloc slab %s size=%zu failed. Reason %d\n",
96190 name, size, err);
96191
96192- s->refcount = -1; /* Exempt from merging for now */
96193+ atomic_set(&s->refcount, -1); /* Exempt from merging for now */
96194 }
96195
96196 struct kmem_cache *__init create_kmalloc_cache(const char *name, size_t size,
96197@@ -339,7 +349,7 @@ struct kmem_cache *__init create_kmalloc_cache(const char *name, size_t size,
96198
96199 create_boot_cache(s, name, size, flags);
96200 list_add(&s->list, &slab_caches);
96201- s->refcount = 1;
96202+ atomic_set(&s->refcount, 1);
96203 return s;
96204 }
96205
96206@@ -351,6 +361,11 @@ struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
96207 EXPORT_SYMBOL(kmalloc_dma_caches);
96208 #endif
96209
96210+#ifdef CONFIG_PAX_USERCOPY_SLABS
96211+struct kmem_cache *kmalloc_usercopy_caches[KMALLOC_SHIFT_HIGH + 1];
96212+EXPORT_SYMBOL(kmalloc_usercopy_caches);
96213+#endif
96214+
96215 /*
96216 * Conversion table for small slabs sizes / 8 to the index in the
96217 * kmalloc array. This is necessary for slabs < 192 since we have non power
96218@@ -415,6 +430,13 @@ struct kmem_cache *kmalloc_slab(size_t size, gfp_t flags)
96219 return kmalloc_dma_caches[index];
96220
96221 #endif
96222+
96223+#ifdef CONFIG_PAX_USERCOPY_SLABS
96224+ if (unlikely((flags & GFP_USERCOPY)))
96225+ return kmalloc_usercopy_caches[index];
96226+
96227+#endif
96228+
96229 return kmalloc_caches[index];
96230 }
96231
96232@@ -471,7 +493,7 @@ void __init create_kmalloc_caches(unsigned long flags)
96233 for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) {
96234 if (!kmalloc_caches[i]) {
96235 kmalloc_caches[i] = create_kmalloc_cache(NULL,
96236- 1 << i, flags);
96237+ 1 << i, SLAB_USERCOPY | flags);
96238 }
96239
96240 /*
96241@@ -480,10 +502,10 @@ void __init create_kmalloc_caches(unsigned long flags)
96242 * earlier power of two caches
96243 */
96244 if (KMALLOC_MIN_SIZE <= 32 && !kmalloc_caches[1] && i == 6)
96245- kmalloc_caches[1] = create_kmalloc_cache(NULL, 96, flags);
96246+ kmalloc_caches[1] = create_kmalloc_cache(NULL, 96, SLAB_USERCOPY | flags);
96247
96248 if (KMALLOC_MIN_SIZE <= 64 && !kmalloc_caches[2] && i == 7)
96249- kmalloc_caches[2] = create_kmalloc_cache(NULL, 192, flags);
96250+ kmalloc_caches[2] = create_kmalloc_cache(NULL, 192, SLAB_USERCOPY | flags);
96251 }
96252
96253 /* Kmalloc array is now usable */
96254@@ -516,6 +538,23 @@ void __init create_kmalloc_caches(unsigned long flags)
96255 }
96256 }
96257 #endif
96258+
96259+#ifdef CONFIG_PAX_USERCOPY_SLABS
96260+ for (i = 0; i <= KMALLOC_SHIFT_HIGH; i++) {
96261+ struct kmem_cache *s = kmalloc_caches[i];
96262+
96263+ if (s) {
96264+ int size = kmalloc_size(i);
96265+ char *n = kasprintf(GFP_NOWAIT,
96266+ "usercopy-kmalloc-%d", size);
96267+
96268+ BUG_ON(!n);
96269+ kmalloc_usercopy_caches[i] = create_kmalloc_cache(n,
96270+ size, SLAB_USERCOPY | flags);
96271+ }
96272+ }
96273+#endif
96274+
96275 }
96276 #endif /* !CONFIG_SLOB */
96277
96278@@ -556,6 +595,9 @@ void print_slabinfo_header(struct seq_file *m)
96279 seq_puts(m, " : globalstat <listallocs> <maxobjs> <grown> <reaped> "
96280 "<error> <maxfreeable> <nodeallocs> <remotefrees> <alienoverflow>");
96281 seq_puts(m, " : cpustat <allochit> <allocmiss> <freehit> <freemiss>");
96282+#ifdef CONFIG_PAX_MEMORY_SANITIZE
96283+ seq_puts(m, " : pax <sanitized> <not_sanitized>");
96284+#endif
96285 #endif
96286 seq_putc(m, '\n');
96287 }
96288diff --git a/mm/slob.c b/mm/slob.c
96289index 4bf8809..98a6914 100644
96290--- a/mm/slob.c
96291+++ b/mm/slob.c
96292@@ -157,7 +157,7 @@ static void set_slob(slob_t *s, slobidx_t size, slob_t *next)
96293 /*
96294 * Return the size of a slob block.
96295 */
96296-static slobidx_t slob_units(slob_t *s)
96297+static slobidx_t slob_units(const slob_t *s)
96298 {
96299 if (s->units > 0)
96300 return s->units;
96301@@ -167,7 +167,7 @@ static slobidx_t slob_units(slob_t *s)
96302 /*
96303 * Return the next free slob block pointer after this one.
96304 */
96305-static slob_t *slob_next(slob_t *s)
96306+static slob_t *slob_next(const slob_t *s)
96307 {
96308 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
96309 slobidx_t next;
96310@@ -182,14 +182,14 @@ static slob_t *slob_next(slob_t *s)
96311 /*
96312 * Returns true if s is the last free block in its page.
96313 */
96314-static int slob_last(slob_t *s)
96315+static int slob_last(const slob_t *s)
96316 {
96317 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
96318 }
96319
96320-static void *slob_new_pages(gfp_t gfp, int order, int node)
96321+static struct page *slob_new_pages(gfp_t gfp, unsigned int order, int node)
96322 {
96323- void *page;
96324+ struct page *page;
96325
96326 #ifdef CONFIG_NUMA
96327 if (node != NUMA_NO_NODE)
96328@@ -201,14 +201,18 @@ static void *slob_new_pages(gfp_t gfp, int order, int node)
96329 if (!page)
96330 return NULL;
96331
96332- return page_address(page);
96333+ __SetPageSlab(page);
96334+ return page;
96335 }
96336
96337-static void slob_free_pages(void *b, int order)
96338+static void slob_free_pages(struct page *sp, int order)
96339 {
96340 if (current->reclaim_state)
96341 current->reclaim_state->reclaimed_slab += 1 << order;
96342- free_pages((unsigned long)b, order);
96343+ __ClearPageSlab(sp);
96344+ page_mapcount_reset(sp);
96345+ sp->private = 0;
96346+ __free_pages(sp, order);
96347 }
96348
96349 /*
96350@@ -313,15 +317,15 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
96351
96352 /* Not enough space: must allocate a new page */
96353 if (!b) {
96354- b = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);
96355- if (!b)
96356+ sp = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);
96357+ if (!sp)
96358 return NULL;
96359- sp = virt_to_page(b);
96360- __SetPageSlab(sp);
96361+ b = page_address(sp);
96362
96363 spin_lock_irqsave(&slob_lock, flags);
96364 sp->units = SLOB_UNITS(PAGE_SIZE);
96365 sp->freelist = b;
96366+ sp->private = 0;
96367 INIT_LIST_HEAD(&sp->list);
96368 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
96369 set_slob_page_free(sp, slob_list);
96370@@ -359,12 +363,15 @@ static void slob_free(void *block, int size)
96371 if (slob_page_free(sp))
96372 clear_slob_page_free(sp);
96373 spin_unlock_irqrestore(&slob_lock, flags);
96374- __ClearPageSlab(sp);
96375- page_mapcount_reset(sp);
96376- slob_free_pages(b, 0);
96377+ slob_free_pages(sp, 0);
96378 return;
96379 }
96380
96381+#ifdef CONFIG_PAX_MEMORY_SANITIZE
96382+ if (pax_sanitize_slab)
96383+ memset(block, PAX_MEMORY_SANITIZE_VALUE, size);
96384+#endif
96385+
96386 if (!slob_page_free(sp)) {
96387 /* This slob page is about to become partially free. Easy! */
96388 sp->units = units;
96389@@ -424,11 +431,10 @@ out:
96390 */
96391
96392 static __always_inline void *
96393-__do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
96394+__do_kmalloc_node_align(size_t size, gfp_t gfp, int node, unsigned long caller, int align)
96395 {
96396- unsigned int *m;
96397- int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
96398- void *ret;
96399+ slob_t *m;
96400+ void *ret = NULL;
96401
96402 gfp &= gfp_allowed_mask;
96403
96404@@ -442,23 +448,41 @@ __do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
96405
96406 if (!m)
96407 return NULL;
96408- *m = size;
96409+ BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
96410+ BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
96411+ m[0].units = size;
96412+ m[1].units = align;
96413 ret = (void *)m + align;
96414
96415 trace_kmalloc_node(caller, ret,
96416 size, size + align, gfp, node);
96417 } else {
96418 unsigned int order = get_order(size);
96419+ struct page *page;
96420
96421 if (likely(order))
96422 gfp |= __GFP_COMP;
96423- ret = slob_new_pages(gfp, order, node);
96424+ page = slob_new_pages(gfp, order, node);
96425+ if (page) {
96426+ ret = page_address(page);
96427+ page->private = size;
96428+ }
96429
96430 trace_kmalloc_node(caller, ret,
96431 size, PAGE_SIZE << order, gfp, node);
96432 }
96433
96434- kmemleak_alloc(ret, size, 1, gfp);
96435+ return ret;
96436+}
96437+
96438+static __always_inline void *
96439+__do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
96440+{
96441+ int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
96442+ void *ret = __do_kmalloc_node_align(size, gfp, node, caller, align);
96443+
96444+ if (!ZERO_OR_NULL_PTR(ret))
96445+ kmemleak_alloc(ret, size, 1, gfp);
96446 return ret;
96447 }
96448
96449@@ -493,34 +517,112 @@ void kfree(const void *block)
96450 return;
96451 kmemleak_free(block);
96452
96453+ VM_BUG_ON(!virt_addr_valid(block));
96454 sp = virt_to_page(block);
96455- if (PageSlab(sp)) {
96456+ VM_BUG_ON(!PageSlab(sp));
96457+ if (!sp->private) {
96458 int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
96459- unsigned int *m = (unsigned int *)(block - align);
96460- slob_free(m, *m + align);
96461- } else
96462+ slob_t *m = (slob_t *)(block - align);
96463+ slob_free(m, m[0].units + align);
96464+ } else {
96465+ __ClearPageSlab(sp);
96466+ page_mapcount_reset(sp);
96467+ sp->private = 0;
96468 __free_pages(sp, compound_order(sp));
96469+ }
96470 }
96471 EXPORT_SYMBOL(kfree);
96472
96473+bool is_usercopy_object(const void *ptr)
96474+{
96475+ if (!slab_is_available())
96476+ return false;
96477+
96478+ // PAX: TODO
96479+
96480+ return false;
96481+}
96482+
96483+#ifdef CONFIG_PAX_USERCOPY
96484+const char *check_heap_object(const void *ptr, unsigned long n)
96485+{
96486+ struct page *page;
96487+ const slob_t *free;
96488+ const void *base;
96489+ unsigned long flags;
96490+
96491+ if (ZERO_OR_NULL_PTR(ptr))
96492+ return "<null>";
96493+
96494+ if (!virt_addr_valid(ptr))
96495+ return NULL;
96496+
96497+ page = virt_to_head_page(ptr);
96498+ if (!PageSlab(page))
96499+ return NULL;
96500+
96501+ if (page->private) {
96502+ base = page;
96503+ if (base <= ptr && n <= page->private - (ptr - base))
96504+ return NULL;
96505+ return "<slob>";
96506+ }
96507+
96508+ /* some tricky double walking to find the chunk */
96509+ spin_lock_irqsave(&slob_lock, flags);
96510+ base = (void *)((unsigned long)ptr & PAGE_MASK);
96511+ free = page->freelist;
96512+
96513+ while (!slob_last(free) && (void *)free <= ptr) {
96514+ base = free + slob_units(free);
96515+ free = slob_next(free);
96516+ }
96517+
96518+ while (base < (void *)free) {
96519+ slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
96520+ int size = SLOB_UNIT * SLOB_UNITS(m + align);
96521+ int offset;
96522+
96523+ if (ptr < base + align)
96524+ break;
96525+
96526+ offset = ptr - base - align;
96527+ if (offset >= m) {
96528+ base += size;
96529+ continue;
96530+ }
96531+
96532+ if (n > m - offset)
96533+ break;
96534+
96535+ spin_unlock_irqrestore(&slob_lock, flags);
96536+ return NULL;
96537+ }
96538+
96539+ spin_unlock_irqrestore(&slob_lock, flags);
96540+ return "<slob>";
96541+}
96542+#endif
96543+
96544 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
96545 size_t ksize(const void *block)
96546 {
96547 struct page *sp;
96548 int align;
96549- unsigned int *m;
96550+ slob_t *m;
96551
96552 BUG_ON(!block);
96553 if (unlikely(block == ZERO_SIZE_PTR))
96554 return 0;
96555
96556 sp = virt_to_page(block);
96557- if (unlikely(!PageSlab(sp)))
96558- return PAGE_SIZE << compound_order(sp);
96559+ VM_BUG_ON(!PageSlab(sp));
96560+ if (sp->private)
96561+ return sp->private;
96562
96563 align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
96564- m = (unsigned int *)(block - align);
96565- return SLOB_UNITS(*m) * SLOB_UNIT;
96566+ m = (slob_t *)(block - align);
96567+ return SLOB_UNITS(m[0].units) * SLOB_UNIT;
96568 }
96569 EXPORT_SYMBOL(ksize);
96570
96571@@ -536,23 +638,33 @@ int __kmem_cache_create(struct kmem_cache *c, unsigned long flags)
96572
96573 void *slob_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
96574 {
96575- void *b;
96576+ void *b = NULL;
96577
96578 flags &= gfp_allowed_mask;
96579
96580 lockdep_trace_alloc(flags);
96581
96582+#ifdef CONFIG_PAX_USERCOPY_SLABS
96583+ b = __do_kmalloc_node_align(c->size, flags, node, _RET_IP_, c->align);
96584+#else
96585 if (c->size < PAGE_SIZE) {
96586 b = slob_alloc(c->size, flags, c->align, node);
96587 trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size,
96588 SLOB_UNITS(c->size) * SLOB_UNIT,
96589 flags, node);
96590 } else {
96591- b = slob_new_pages(flags, get_order(c->size), node);
96592+ struct page *sp;
96593+
96594+ sp = slob_new_pages(flags, get_order(c->size), node);
96595+ if (sp) {
96596+ b = page_address(sp);
96597+ sp->private = c->size;
96598+ }
96599 trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size,
96600 PAGE_SIZE << get_order(c->size),
96601 flags, node);
96602 }
96603+#endif
96604
96605 if (b && c->ctor)
96606 c->ctor(b);
96607@@ -584,10 +696,14 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
96608
96609 static void __kmem_cache_free(void *b, int size)
96610 {
96611- if (size < PAGE_SIZE)
96612+ struct page *sp;
96613+
96614+ sp = virt_to_page(b);
96615+ BUG_ON(!PageSlab(sp));
96616+ if (!sp->private)
96617 slob_free(b, size);
96618 else
96619- slob_free_pages(b, get_order(size));
96620+ slob_free_pages(sp, get_order(size));
96621 }
96622
96623 static void kmem_rcu_free(struct rcu_head *head)
96624@@ -600,17 +716,31 @@ static void kmem_rcu_free(struct rcu_head *head)
96625
96626 void kmem_cache_free(struct kmem_cache *c, void *b)
96627 {
96628+ int size = c->size;
96629+
96630+#ifdef CONFIG_PAX_USERCOPY_SLABS
96631+ if (size + c->align < PAGE_SIZE) {
96632+ size += c->align;
96633+ b -= c->align;
96634+ }
96635+#endif
96636+
96637 kmemleak_free_recursive(b, c->flags);
96638 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
96639 struct slob_rcu *slob_rcu;
96640- slob_rcu = b + (c->size - sizeof(struct slob_rcu));
96641- slob_rcu->size = c->size;
96642+ slob_rcu = b + (size - sizeof(struct slob_rcu));
96643+ slob_rcu->size = size;
96644 call_rcu(&slob_rcu->head, kmem_rcu_free);
96645 } else {
96646- __kmem_cache_free(b, c->size);
96647+ __kmem_cache_free(b, size);
96648 }
96649
96650+#ifdef CONFIG_PAX_USERCOPY_SLABS
96651+ trace_kfree(_RET_IP_, b);
96652+#else
96653 trace_kmem_cache_free(_RET_IP_, b);
96654+#endif
96655+
96656 }
96657 EXPORT_SYMBOL(kmem_cache_free);
96658
96659diff --git a/mm/slub.c b/mm/slub.c
96660index 25f14ad..c904f6f 100644
96661--- a/mm/slub.c
96662+++ b/mm/slub.c
96663@@ -207,7 +207,7 @@ struct track {
96664
96665 enum track_item { TRACK_ALLOC, TRACK_FREE };
96666
96667-#ifdef CONFIG_SYSFS
96668+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
96669 static int sysfs_slab_add(struct kmem_cache *);
96670 static int sysfs_slab_alias(struct kmem_cache *, const char *);
96671 static void sysfs_slab_remove(struct kmem_cache *);
96672@@ -545,7 +545,7 @@ static void print_track(const char *s, struct track *t)
96673 if (!t->addr)
96674 return;
96675
96676- printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
96677+ printk(KERN_ERR "INFO: %s in %pA age=%lu cpu=%u pid=%d\n",
96678 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
96679 #ifdef CONFIG_STACKTRACE
96680 {
96681@@ -2666,6 +2666,14 @@ static __always_inline void slab_free(struct kmem_cache *s,
96682
96683 slab_free_hook(s, x);
96684
96685+#ifdef CONFIG_PAX_MEMORY_SANITIZE
96686+ if (pax_sanitize_slab && !(s->flags & SLAB_NO_SANITIZE)) {
96687+ memset(x, PAX_MEMORY_SANITIZE_VALUE, s->object_size);
96688+ if (s->ctor)
96689+ s->ctor(x);
96690+ }
96691+#endif
96692+
96693 redo:
96694 /*
96695 * Determine the currently cpus per cpu slab.
96696@@ -2733,7 +2741,7 @@ static int slub_min_objects;
96697 * Merge control. If this is set then no merging of slab caches will occur.
96698 * (Could be removed. This was introduced to pacify the merge skeptics.)
96699 */
96700-static int slub_nomerge;
96701+static int slub_nomerge = 1;
96702
96703 /*
96704 * Calculate the order of allocation given an slab object size.
96705@@ -3014,6 +3022,9 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
96706 s->inuse = size;
96707
96708 if (((flags & (SLAB_DESTROY_BY_RCU | SLAB_POISON)) ||
96709+#ifdef CONFIG_PAX_MEMORY_SANITIZE
96710+ (pax_sanitize_slab && !(flags & SLAB_NO_SANITIZE)) ||
96711+#endif
96712 s->ctor)) {
96713 /*
96714 * Relocate free pointer after the object if it is not
96715@@ -3359,6 +3370,59 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
96716 EXPORT_SYMBOL(__kmalloc_node);
96717 #endif
96718
96719+bool is_usercopy_object(const void *ptr)
96720+{
96721+ struct page *page;
96722+ struct kmem_cache *s;
96723+
96724+ if (ZERO_OR_NULL_PTR(ptr))
96725+ return false;
96726+
96727+ if (!slab_is_available())
96728+ return false;
96729+
96730+ if (!virt_addr_valid(ptr))
96731+ return false;
96732+
96733+ page = virt_to_head_page(ptr);
96734+
96735+ if (!PageSlab(page))
96736+ return false;
96737+
96738+ s = page->slab_cache;
96739+ return s->flags & SLAB_USERCOPY;
96740+}
96741+
96742+#ifdef CONFIG_PAX_USERCOPY
96743+const char *check_heap_object(const void *ptr, unsigned long n)
96744+{
96745+ struct page *page;
96746+ struct kmem_cache *s;
96747+ unsigned long offset;
96748+
96749+ if (ZERO_OR_NULL_PTR(ptr))
96750+ return "<null>";
96751+
96752+ if (!virt_addr_valid(ptr))
96753+ return NULL;
96754+
96755+ page = virt_to_head_page(ptr);
96756+
96757+ if (!PageSlab(page))
96758+ return NULL;
96759+
96760+ s = page->slab_cache;
96761+ if (!(s->flags & SLAB_USERCOPY))
96762+ return s->name;
96763+
96764+ offset = (ptr - page_address(page)) % s->size;
96765+ if (offset <= s->object_size && n <= s->object_size - offset)
96766+ return NULL;
96767+
96768+ return s->name;
96769+}
96770+#endif
96771+
96772 size_t ksize(const void *object)
96773 {
96774 struct page *page;
96775@@ -3387,6 +3451,7 @@ void kfree(const void *x)
96776 if (unlikely(ZERO_OR_NULL_PTR(x)))
96777 return;
96778
96779+ VM_BUG_ON(!virt_addr_valid(x));
96780 page = virt_to_head_page(x);
96781 if (unlikely(!PageSlab(page))) {
96782 BUG_ON(!PageCompound(page));
96783@@ -3692,7 +3757,7 @@ static int slab_unmergeable(struct kmem_cache *s)
96784 /*
96785 * We may have set a slab to be unmergeable during bootstrap.
96786 */
96787- if (s->refcount < 0)
96788+ if (atomic_read(&s->refcount) < 0)
96789 return 1;
96790
96791 return 0;
96792@@ -3750,7 +3815,7 @@ __kmem_cache_alias(struct mem_cgroup *memcg, const char *name, size_t size,
96793
96794 s = find_mergeable(memcg, size, align, flags, name, ctor);
96795 if (s) {
96796- s->refcount++;
96797+ atomic_inc(&s->refcount);
96798 /*
96799 * Adjust the object sizes so that we clear
96800 * the complete object on kzalloc.
96801@@ -3759,7 +3824,7 @@ __kmem_cache_alias(struct mem_cgroup *memcg, const char *name, size_t size,
96802 s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
96803
96804 if (sysfs_slab_alias(s, name)) {
96805- s->refcount--;
96806+ atomic_dec(&s->refcount);
96807 s = NULL;
96808 }
96809 }
96810@@ -3879,7 +3944,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
96811 }
96812 #endif
96813
96814-#ifdef CONFIG_SYSFS
96815+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
96816 static int count_inuse(struct page *page)
96817 {
96818 return page->inuse;
96819@@ -4163,7 +4228,11 @@ static int list_locations(struct kmem_cache *s, char *buf,
96820 len += sprintf(buf + len, "%7ld ", l->count);
96821
96822 if (l->addr)
96823+#ifdef CONFIG_GRKERNSEC_HIDESYM
96824+ len += sprintf(buf + len, "%pS", NULL);
96825+#else
96826 len += sprintf(buf + len, "%pS", (void *)l->addr);
96827+#endif
96828 else
96829 len += sprintf(buf + len, "<not-available>");
96830
96831@@ -4268,12 +4337,12 @@ static void resiliency_test(void)
96832 validate_slab_cache(kmalloc_caches[9]);
96833 }
96834 #else
96835-#ifdef CONFIG_SYSFS
96836+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
96837 static void resiliency_test(void) {};
96838 #endif
96839 #endif
96840
96841-#ifdef CONFIG_SYSFS
96842+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
96843 enum slab_stat_type {
96844 SL_ALL, /* All slabs */
96845 SL_PARTIAL, /* Only partially allocated slabs */
96846@@ -4513,13 +4582,17 @@ static ssize_t ctor_show(struct kmem_cache *s, char *buf)
96847 {
96848 if (!s->ctor)
96849 return 0;
96850+#ifdef CONFIG_GRKERNSEC_HIDESYM
96851+ return sprintf(buf, "%pS\n", NULL);
96852+#else
96853 return sprintf(buf, "%pS\n", s->ctor);
96854+#endif
96855 }
96856 SLAB_ATTR_RO(ctor);
96857
96858 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
96859 {
96860- return sprintf(buf, "%d\n", s->refcount - 1);
96861+ return sprintf(buf, "%d\n", atomic_read(&s->refcount) - 1);
96862 }
96863 SLAB_ATTR_RO(aliases);
96864
96865@@ -4607,6 +4680,14 @@ static ssize_t cache_dma_show(struct kmem_cache *s, char *buf)
96866 SLAB_ATTR_RO(cache_dma);
96867 #endif
96868
96869+#ifdef CONFIG_PAX_USERCOPY_SLABS
96870+static ssize_t usercopy_show(struct kmem_cache *s, char *buf)
96871+{
96872+ return sprintf(buf, "%d\n", !!(s->flags & SLAB_USERCOPY));
96873+}
96874+SLAB_ATTR_RO(usercopy);
96875+#endif
96876+
96877 static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf)
96878 {
96879 return sprintf(buf, "%d\n", !!(s->flags & SLAB_DESTROY_BY_RCU));
96880@@ -4941,6 +5022,9 @@ static struct attribute *slab_attrs[] = {
96881 #ifdef CONFIG_ZONE_DMA
96882 &cache_dma_attr.attr,
96883 #endif
96884+#ifdef CONFIG_PAX_USERCOPY_SLABS
96885+ &usercopy_attr.attr,
96886+#endif
96887 #ifdef CONFIG_NUMA
96888 &remote_node_defrag_ratio_attr.attr,
96889 #endif
96890@@ -5173,6 +5257,7 @@ static char *create_unique_id(struct kmem_cache *s)
96891 return name;
96892 }
96893
96894+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
96895 static int sysfs_slab_add(struct kmem_cache *s)
96896 {
96897 int err;
96898@@ -5230,6 +5315,7 @@ static void sysfs_slab_remove(struct kmem_cache *s)
96899 kobject_del(&s->kobj);
96900 kobject_put(&s->kobj);
96901 }
96902+#endif
96903
96904 /*
96905 * Need to buffer aliases during bootup until sysfs becomes
96906@@ -5243,6 +5329,7 @@ struct saved_alias {
96907
96908 static struct saved_alias *alias_list;
96909
96910+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
96911 static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
96912 {
96913 struct saved_alias *al;
96914@@ -5265,6 +5352,7 @@ static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
96915 alias_list = al;
96916 return 0;
96917 }
96918+#endif
96919
96920 static int __init slab_sysfs_init(void)
96921 {
96922diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c
96923index 4cba9c2..b4f9fcc 100644
96924--- a/mm/sparse-vmemmap.c
96925+++ b/mm/sparse-vmemmap.c
96926@@ -131,7 +131,7 @@ pud_t * __meminit vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node)
96927 void *p = vmemmap_alloc_block(PAGE_SIZE, node);
96928 if (!p)
96929 return NULL;
96930- pud_populate(&init_mm, pud, p);
96931+ pud_populate_kernel(&init_mm, pud, p);
96932 }
96933 return pud;
96934 }
96935@@ -143,7 +143,7 @@ pgd_t * __meminit vmemmap_pgd_populate(unsigned long addr, int node)
96936 void *p = vmemmap_alloc_block(PAGE_SIZE, node);
96937 if (!p)
96938 return NULL;
96939- pgd_populate(&init_mm, pgd, p);
96940+ pgd_populate_kernel(&init_mm, pgd, p);
96941 }
96942 return pgd;
96943 }
96944diff --git a/mm/sparse.c b/mm/sparse.c
96945index 63c3ea5..95c0858 100644
96946--- a/mm/sparse.c
96947+++ b/mm/sparse.c
96948@@ -748,7 +748,7 @@ static void clear_hwpoisoned_pages(struct page *memmap, int nr_pages)
96949
96950 for (i = 0; i < PAGES_PER_SECTION; i++) {
96951 if (PageHWPoison(&memmap[i])) {
96952- atomic_long_sub(1, &num_poisoned_pages);
96953+ atomic_long_sub_unchecked(1, &num_poisoned_pages);
96954 ClearPageHWPoison(&memmap[i]);
96955 }
96956 }
96957diff --git a/mm/swap.c b/mm/swap.c
96958index 0092097..33361ff 100644
96959--- a/mm/swap.c
96960+++ b/mm/swap.c
96961@@ -31,6 +31,7 @@
96962 #include <linux/memcontrol.h>
96963 #include <linux/gfp.h>
96964 #include <linux/uio.h>
96965+#include <linux/hugetlb.h>
96966
96967 #include "internal.h"
96968
96969@@ -76,6 +77,8 @@ static void __put_compound_page(struct page *page)
96970
96971 __page_cache_release(page);
96972 dtor = get_compound_page_dtor(page);
96973+ if (!PageHuge(page))
96974+ BUG_ON(dtor != free_compound_page);
96975 (*dtor)(page);
96976 }
96977
96978diff --git a/mm/swapfile.c b/mm/swapfile.c
96979index 4a7f7e6..22cddf5 100644
96980--- a/mm/swapfile.c
96981+++ b/mm/swapfile.c
96982@@ -66,7 +66,7 @@ static DEFINE_MUTEX(swapon_mutex);
96983
96984 static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait);
96985 /* Activity counter to indicate that a swapon or swapoff has occurred */
96986-static atomic_t proc_poll_event = ATOMIC_INIT(0);
96987+static atomic_unchecked_t proc_poll_event = ATOMIC_INIT(0);
96988
96989 static inline unsigned char swap_count(unsigned char ent)
96990 {
96991@@ -1959,7 +1959,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
96992 spin_unlock(&swap_lock);
96993
96994 err = 0;
96995- atomic_inc(&proc_poll_event);
96996+ atomic_inc_unchecked(&proc_poll_event);
96997 wake_up_interruptible(&proc_poll_wait);
96998
96999 out_dput:
97000@@ -1976,8 +1976,8 @@ static unsigned swaps_poll(struct file *file, poll_table *wait)
97001
97002 poll_wait(file, &proc_poll_wait, wait);
97003
97004- if (seq->poll_event != atomic_read(&proc_poll_event)) {
97005- seq->poll_event = atomic_read(&proc_poll_event);
97006+ if (seq->poll_event != atomic_read_unchecked(&proc_poll_event)) {
97007+ seq->poll_event = atomic_read_unchecked(&proc_poll_event);
97008 return POLLIN | POLLRDNORM | POLLERR | POLLPRI;
97009 }
97010
97011@@ -2075,7 +2075,7 @@ static int swaps_open(struct inode *inode, struct file *file)
97012 return ret;
97013
97014 seq = file->private_data;
97015- seq->poll_event = atomic_read(&proc_poll_event);
97016+ seq->poll_event = atomic_read_unchecked(&proc_poll_event);
97017 return 0;
97018 }
97019
97020@@ -2534,7 +2534,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
97021 (frontswap_map) ? "FS" : "");
97022
97023 mutex_unlock(&swapon_mutex);
97024- atomic_inc(&proc_poll_event);
97025+ atomic_inc_unchecked(&proc_poll_event);
97026 wake_up_interruptible(&proc_poll_wait);
97027
97028 if (S_ISREG(inode->i_mode))
97029diff --git a/mm/util.c b/mm/util.c
97030index a24aa22..a0d41ae 100644
97031--- a/mm/util.c
97032+++ b/mm/util.c
97033@@ -297,6 +297,12 @@ done:
97034 void arch_pick_mmap_layout(struct mm_struct *mm)
97035 {
97036 mm->mmap_base = TASK_UNMAPPED_BASE;
97037+
97038+#ifdef CONFIG_PAX_RANDMMAP
97039+ if (mm->pax_flags & MF_PAX_RANDMMAP)
97040+ mm->mmap_base += mm->delta_mmap;
97041+#endif
97042+
97043 mm->get_unmapped_area = arch_get_unmapped_area;
97044 }
97045 #endif
97046diff --git a/mm/vmalloc.c b/mm/vmalloc.c
97047index 0fdf968..2183ba3 100644
97048--- a/mm/vmalloc.c
97049+++ b/mm/vmalloc.c
97050@@ -59,8 +59,19 @@ static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
97051
97052 pte = pte_offset_kernel(pmd, addr);
97053 do {
97054- pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
97055- WARN_ON(!pte_none(ptent) && !pte_present(ptent));
97056+
97057+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
97058+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
97059+ BUG_ON(!pte_exec(*pte));
97060+ set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
97061+ continue;
97062+ }
97063+#endif
97064+
97065+ {
97066+ pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
97067+ WARN_ON(!pte_none(ptent) && !pte_present(ptent));
97068+ }
97069 } while (pte++, addr += PAGE_SIZE, addr != end);
97070 }
97071
97072@@ -120,16 +131,29 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
97073 pte = pte_alloc_kernel(pmd, addr);
97074 if (!pte)
97075 return -ENOMEM;
97076+
97077+ pax_open_kernel();
97078 do {
97079 struct page *page = pages[*nr];
97080
97081- if (WARN_ON(!pte_none(*pte)))
97082+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
97083+ if (pgprot_val(prot) & _PAGE_NX)
97084+#endif
97085+
97086+ if (!pte_none(*pte)) {
97087+ pax_close_kernel();
97088+ WARN_ON(1);
97089 return -EBUSY;
97090- if (WARN_ON(!page))
97091+ }
97092+ if (!page) {
97093+ pax_close_kernel();
97094+ WARN_ON(1);
97095 return -ENOMEM;
97096+ }
97097 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
97098 (*nr)++;
97099 } while (pte++, addr += PAGE_SIZE, addr != end);
97100+ pax_close_kernel();
97101 return 0;
97102 }
97103
97104@@ -139,7 +163,7 @@ static int vmap_pmd_range(pud_t *pud, unsigned long addr,
97105 pmd_t *pmd;
97106 unsigned long next;
97107
97108- pmd = pmd_alloc(&init_mm, pud, addr);
97109+ pmd = pmd_alloc_kernel(&init_mm, pud, addr);
97110 if (!pmd)
97111 return -ENOMEM;
97112 do {
97113@@ -156,7 +180,7 @@ static int vmap_pud_range(pgd_t *pgd, unsigned long addr,
97114 pud_t *pud;
97115 unsigned long next;
97116
97117- pud = pud_alloc(&init_mm, pgd, addr);
97118+ pud = pud_alloc_kernel(&init_mm, pgd, addr);
97119 if (!pud)
97120 return -ENOMEM;
97121 do {
97122@@ -216,6 +240,12 @@ int is_vmalloc_or_module_addr(const void *x)
97123 if (addr >= MODULES_VADDR && addr < MODULES_END)
97124 return 1;
97125 #endif
97126+
97127+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
97128+ if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
97129+ return 1;
97130+#endif
97131+
97132 return is_vmalloc_addr(x);
97133 }
97134
97135@@ -236,8 +266,14 @@ struct page *vmalloc_to_page(const void *vmalloc_addr)
97136
97137 if (!pgd_none(*pgd)) {
97138 pud_t *pud = pud_offset(pgd, addr);
97139+#ifdef CONFIG_X86
97140+ if (!pud_large(*pud))
97141+#endif
97142 if (!pud_none(*pud)) {
97143 pmd_t *pmd = pmd_offset(pud, addr);
97144+#ifdef CONFIG_X86
97145+ if (!pmd_large(*pmd))
97146+#endif
97147 if (!pmd_none(*pmd)) {
97148 pte_t *ptep, pte;
97149
97150@@ -1309,6 +1345,16 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
97151 struct vm_struct *area;
97152
97153 BUG_ON(in_interrupt());
97154+
97155+#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
97156+ if (flags & VM_KERNEXEC) {
97157+ if (start != VMALLOC_START || end != VMALLOC_END)
97158+ return NULL;
97159+ start = (unsigned long)MODULES_EXEC_VADDR;
97160+ end = (unsigned long)MODULES_EXEC_END;
97161+ }
97162+#endif
97163+
97164 if (flags & VM_IOREMAP)
97165 align = 1ul << clamp(fls(size), PAGE_SHIFT, IOREMAP_MAX_ORDER);
97166
97167@@ -1534,6 +1580,11 @@ void *vmap(struct page **pages, unsigned int count,
97168 if (count > totalram_pages)
97169 return NULL;
97170
97171+#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
97172+ if (!(pgprot_val(prot) & _PAGE_NX))
97173+ flags |= VM_KERNEXEC;
97174+#endif
97175+
97176 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
97177 __builtin_return_address(0));
97178 if (!area)
97179@@ -1634,6 +1685,13 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
97180 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
97181 goto fail;
97182
97183+#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
97184+ if (!(pgprot_val(prot) & _PAGE_NX))
97185+ area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNINITIALIZED | VM_KERNEXEC,
97186+ VMALLOC_START, VMALLOC_END, node, gfp_mask, caller);
97187+ else
97188+#endif
97189+
97190 area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNINITIALIZED,
97191 start, end, node, gfp_mask, caller);
97192 if (!area)
97193@@ -1701,6 +1759,12 @@ static inline void *__vmalloc_node_flags(unsigned long size,
97194 node, __builtin_return_address(0));
97195 }
97196
97197+void *vmalloc_stack(int node)
97198+{
97199+ return __vmalloc_node(THREAD_SIZE, THREAD_SIZE, THREADINFO_GFP, PAGE_KERNEL,
97200+ node, __builtin_return_address(0));
97201+}
97202+
97203 /**
97204 * vmalloc - allocate virtually contiguous memory
97205 * @size: allocation size
97206@@ -1810,10 +1874,9 @@ EXPORT_SYMBOL(vzalloc_node);
97207 * For tight control over page level allocator and protection flags
97208 * use __vmalloc() instead.
97209 */
97210-
97211 void *vmalloc_exec(unsigned long size)
97212 {
97213- return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
97214+ return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
97215 NUMA_NO_NODE, __builtin_return_address(0));
97216 }
97217
97218@@ -2120,6 +2183,8 @@ int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr,
97219 {
97220 struct vm_struct *area;
97221
97222+ BUG_ON(vma->vm_mirror);
97223+
97224 size = PAGE_ALIGN(size);
97225
97226 if (!PAGE_ALIGNED(uaddr) || !PAGE_ALIGNED(kaddr))
97227@@ -2602,7 +2667,11 @@ static int s_show(struct seq_file *m, void *p)
97228 v->addr, v->addr + v->size, v->size);
97229
97230 if (v->caller)
97231+#ifdef CONFIG_GRKERNSEC_HIDESYM
97232+ seq_printf(m, " %pK", v->caller);
97233+#else
97234 seq_printf(m, " %pS", v->caller);
97235+#endif
97236
97237 if (v->nr_pages)
97238 seq_printf(m, " pages=%d", v->nr_pages);
97239diff --git a/mm/vmstat.c b/mm/vmstat.c
97240index def5dd2..4ce55cec 100644
97241--- a/mm/vmstat.c
97242+++ b/mm/vmstat.c
97243@@ -20,6 +20,7 @@
97244 #include <linux/writeback.h>
97245 #include <linux/compaction.h>
97246 #include <linux/mm_inline.h>
97247+#include <linux/grsecurity.h>
97248
97249 #include "internal.h"
97250
97251@@ -79,7 +80,7 @@ void vm_events_fold_cpu(int cpu)
97252 *
97253 * vm_stat contains the global counters
97254 */
97255-atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
97256+atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
97257 EXPORT_SYMBOL(vm_stat);
97258
97259 #ifdef CONFIG_SMP
97260@@ -423,7 +424,7 @@ static inline void fold_diff(int *diff)
97261
97262 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
97263 if (diff[i])
97264- atomic_long_add(diff[i], &vm_stat[i]);
97265+ atomic_long_add_unchecked(diff[i], &vm_stat[i]);
97266 }
97267
97268 /*
97269@@ -455,7 +456,7 @@ static void refresh_cpu_vm_stats(void)
97270 v = this_cpu_xchg(p->vm_stat_diff[i], 0);
97271 if (v) {
97272
97273- atomic_long_add(v, &zone->vm_stat[i]);
97274+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
97275 global_diff[i] += v;
97276 #ifdef CONFIG_NUMA
97277 /* 3 seconds idle till flush */
97278@@ -517,7 +518,7 @@ void cpu_vm_stats_fold(int cpu)
97279
97280 v = p->vm_stat_diff[i];
97281 p->vm_stat_diff[i] = 0;
97282- atomic_long_add(v, &zone->vm_stat[i]);
97283+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
97284 global_diff[i] += v;
97285 }
97286 }
97287@@ -537,8 +538,8 @@ void drain_zonestat(struct zone *zone, struct per_cpu_pageset *pset)
97288 if (pset->vm_stat_diff[i]) {
97289 int v = pset->vm_stat_diff[i];
97290 pset->vm_stat_diff[i] = 0;
97291- atomic_long_add(v, &zone->vm_stat[i]);
97292- atomic_long_add(v, &vm_stat[i]);
97293+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
97294+ atomic_long_add_unchecked(v, &vm_stat[i]);
97295 }
97296 }
97297 #endif
97298@@ -1150,10 +1151,22 @@ static void *vmstat_start(struct seq_file *m, loff_t *pos)
97299 stat_items_size += sizeof(struct vm_event_state);
97300 #endif
97301
97302- v = kmalloc(stat_items_size, GFP_KERNEL);
97303+ v = kzalloc(stat_items_size, GFP_KERNEL);
97304 m->private = v;
97305 if (!v)
97306 return ERR_PTR(-ENOMEM);
97307+
97308+#ifdef CONFIG_GRKERNSEC_PROC_ADD
97309+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
97310+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID)
97311+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
97312+ && !in_group_p(grsec_proc_gid)
97313+#endif
97314+ )
97315+ return (unsigned long *)m->private + *pos;
97316+#endif
97317+#endif
97318+
97319 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
97320 v[i] = global_page_state(i);
97321 v += NR_VM_ZONE_STAT_ITEMS;
97322@@ -1302,10 +1315,16 @@ static int __init setup_vmstat(void)
97323 put_online_cpus();
97324 #endif
97325 #ifdef CONFIG_PROC_FS
97326- proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
97327- proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
97328- proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
97329- proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
97330+ {
97331+ mode_t gr_mode = S_IRUGO;
97332+#ifdef CONFIG_GRKERNSEC_PROC_ADD
97333+ gr_mode = S_IRUSR;
97334+#endif
97335+ proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations);
97336+ proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops);
97337+ proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
97338+ proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations);
97339+ }
97340 #endif
97341 return 0;
97342 }
97343diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
97344index 44ebd5c..1f732bae 100644
97345--- a/net/8021q/vlan.c
97346+++ b/net/8021q/vlan.c
97347@@ -475,7 +475,7 @@ out:
97348 return NOTIFY_DONE;
97349 }
97350
97351-static struct notifier_block vlan_notifier_block __read_mostly = {
97352+static struct notifier_block vlan_notifier_block = {
97353 .notifier_call = vlan_device_event,
97354 };
97355
97356@@ -550,8 +550,7 @@ static int vlan_ioctl_handler(struct net *net, void __user *arg)
97357 err = -EPERM;
97358 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
97359 break;
97360- if ((args.u.name_type >= 0) &&
97361- (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
97362+ if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
97363 struct vlan_net *vn;
97364
97365 vn = net_generic(net, vlan_net_id);
97366diff --git a/net/9p/client.c b/net/9p/client.c
97367index 9186550..e604a2f 100644
97368--- a/net/9p/client.c
97369+++ b/net/9p/client.c
97370@@ -588,7 +588,7 @@ static int p9_check_zc_errors(struct p9_client *c, struct p9_req_t *req,
97371 len - inline_len);
97372 } else {
97373 err = copy_from_user(ename + inline_len,
97374- uidata, len - inline_len);
97375+ (char __force_user *)uidata, len - inline_len);
97376 if (err) {
97377 err = -EFAULT;
97378 goto out_err;
97379@@ -1560,7 +1560,7 @@ p9_client_read(struct p9_fid *fid, char *data, char __user *udata, u64 offset,
97380 kernel_buf = 1;
97381 indata = data;
97382 } else
97383- indata = (__force char *)udata;
97384+ indata = (__force_kernel char *)udata;
97385 /*
97386 * response header len is 11
97387 * PDU Header(7) + IO Size (4)
97388@@ -1635,7 +1635,7 @@ p9_client_write(struct p9_fid *fid, char *data, const char __user *udata,
97389 kernel_buf = 1;
97390 odata = data;
97391 } else
97392- odata = (char *)udata;
97393+ odata = (char __force_kernel *)udata;
97394 req = p9_client_zc_rpc(clnt, P9_TWRITE, NULL, odata, 0, rsize,
97395 P9_ZC_HDR_SZ, kernel_buf, "dqd",
97396 fid->fid, offset, rsize);
97397diff --git a/net/9p/mod.c b/net/9p/mod.c
97398index 6ab36ae..6f1841b 100644
97399--- a/net/9p/mod.c
97400+++ b/net/9p/mod.c
97401@@ -84,7 +84,7 @@ static LIST_HEAD(v9fs_trans_list);
97402 void v9fs_register_trans(struct p9_trans_module *m)
97403 {
97404 spin_lock(&v9fs_trans_lock);
97405- list_add_tail(&m->list, &v9fs_trans_list);
97406+ pax_list_add_tail((struct list_head *)&m->list, &v9fs_trans_list);
97407 spin_unlock(&v9fs_trans_lock);
97408 }
97409 EXPORT_SYMBOL(v9fs_register_trans);
97410@@ -97,7 +97,7 @@ EXPORT_SYMBOL(v9fs_register_trans);
97411 void v9fs_unregister_trans(struct p9_trans_module *m)
97412 {
97413 spin_lock(&v9fs_trans_lock);
97414- list_del_init(&m->list);
97415+ pax_list_del_init((struct list_head *)&m->list);
97416 spin_unlock(&v9fs_trans_lock);
97417 }
97418 EXPORT_SYMBOL(v9fs_unregister_trans);
97419diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
97420index b7bd7f2..2498bf7 100644
97421--- a/net/9p/trans_fd.c
97422+++ b/net/9p/trans_fd.c
97423@@ -432,7 +432,7 @@ static int p9_fd_write(struct p9_client *client, void *v, int len)
97424 oldfs = get_fs();
97425 set_fs(get_ds());
97426 /* The cast to a user pointer is valid due to the set_fs() */
97427- ret = vfs_write(ts->wr, (__force void __user *)v, len, &ts->wr->f_pos);
97428+ ret = vfs_write(ts->wr, (void __force_user *)v, len, &ts->wr->f_pos);
97429 set_fs(oldfs);
97430
97431 if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
97432diff --git a/net/appletalk/atalk_proc.c b/net/appletalk/atalk_proc.c
97433index af46bc4..f9adfcd 100644
97434--- a/net/appletalk/atalk_proc.c
97435+++ b/net/appletalk/atalk_proc.c
97436@@ -256,7 +256,7 @@ int __init atalk_proc_init(void)
97437 struct proc_dir_entry *p;
97438 int rc = -ENOMEM;
97439
97440- atalk_proc_dir = proc_mkdir("atalk", init_net.proc_net);
97441+ atalk_proc_dir = proc_mkdir_restrict("atalk", init_net.proc_net);
97442 if (!atalk_proc_dir)
97443 goto out;
97444
97445diff --git a/net/atm/atm_misc.c b/net/atm/atm_misc.c
97446index 876fbe8..8bbea9f 100644
97447--- a/net/atm/atm_misc.c
97448+++ b/net/atm/atm_misc.c
97449@@ -17,7 +17,7 @@ int atm_charge(struct atm_vcc *vcc, int truesize)
97450 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
97451 return 1;
97452 atm_return(vcc, truesize);
97453- atomic_inc(&vcc->stats->rx_drop);
97454+ atomic_inc_unchecked(&vcc->stats->rx_drop);
97455 return 0;
97456 }
97457 EXPORT_SYMBOL(atm_charge);
97458@@ -39,7 +39,7 @@ struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc, int pdu_size,
97459 }
97460 }
97461 atm_return(vcc, guess);
97462- atomic_inc(&vcc->stats->rx_drop);
97463+ atomic_inc_unchecked(&vcc->stats->rx_drop);
97464 return NULL;
97465 }
97466 EXPORT_SYMBOL(atm_alloc_charge);
97467@@ -86,7 +86,7 @@ EXPORT_SYMBOL(atm_pcr_goal);
97468
97469 void sonet_copy_stats(struct k_sonet_stats *from, struct sonet_stats *to)
97470 {
97471-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
97472+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
97473 __SONET_ITEMS
97474 #undef __HANDLE_ITEM
97475 }
97476@@ -94,7 +94,7 @@ EXPORT_SYMBOL(sonet_copy_stats);
97477
97478 void sonet_subtract_stats(struct k_sonet_stats *from, struct sonet_stats *to)
97479 {
97480-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
97481+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
97482 __SONET_ITEMS
97483 #undef __HANDLE_ITEM
97484 }
97485diff --git a/net/atm/lec.c b/net/atm/lec.c
97486index 5a2f602..9396143 100644
97487--- a/net/atm/lec.c
97488+++ b/net/atm/lec.c
97489@@ -111,9 +111,9 @@ static inline void lec_arp_put(struct lec_arp_table *entry)
97490 }
97491
97492 static struct lane2_ops lane2_ops = {
97493- lane2_resolve, /* resolve, spec 3.1.3 */
97494- lane2_associate_req, /* associate_req, spec 3.1.4 */
97495- NULL /* associate indicator, spec 3.1.5 */
97496+ .resolve = lane2_resolve,
97497+ .associate_req = lane2_associate_req,
97498+ .associate_indicator = NULL
97499 };
97500
97501 static unsigned char bus_mac[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
97502diff --git a/net/atm/lec.h b/net/atm/lec.h
97503index 4149db1..f2ab682 100644
97504--- a/net/atm/lec.h
97505+++ b/net/atm/lec.h
97506@@ -48,7 +48,7 @@ struct lane2_ops {
97507 const u8 *tlvs, u32 sizeoftlvs);
97508 void (*associate_indicator) (struct net_device *dev, const u8 *mac_addr,
97509 const u8 *tlvs, u32 sizeoftlvs);
97510-};
97511+} __no_const;
97512
97513 /*
97514 * ATM LAN Emulation supports both LLC & Dix Ethernet EtherType
97515diff --git a/net/atm/mpoa_caches.c b/net/atm/mpoa_caches.c
97516index d1b2d9a..d549f7f 100644
97517--- a/net/atm/mpoa_caches.c
97518+++ b/net/atm/mpoa_caches.c
97519@@ -535,30 +535,30 @@ static void eg_destroy_cache(struct mpoa_client *mpc)
97520
97521
97522 static struct in_cache_ops ingress_ops = {
97523- in_cache_add_entry, /* add_entry */
97524- in_cache_get, /* get */
97525- in_cache_get_with_mask, /* get_with_mask */
97526- in_cache_get_by_vcc, /* get_by_vcc */
97527- in_cache_put, /* put */
97528- in_cache_remove_entry, /* remove_entry */
97529- cache_hit, /* cache_hit */
97530- clear_count_and_expired, /* clear_count */
97531- check_resolving_entries, /* check_resolving */
97532- refresh_entries, /* refresh */
97533- in_destroy_cache /* destroy_cache */
97534+ .add_entry = in_cache_add_entry,
97535+ .get = in_cache_get,
97536+ .get_with_mask = in_cache_get_with_mask,
97537+ .get_by_vcc = in_cache_get_by_vcc,
97538+ .put = in_cache_put,
97539+ .remove_entry = in_cache_remove_entry,
97540+ .cache_hit = cache_hit,
97541+ .clear_count = clear_count_and_expired,
97542+ .check_resolving = check_resolving_entries,
97543+ .refresh = refresh_entries,
97544+ .destroy_cache = in_destroy_cache
97545 };
97546
97547 static struct eg_cache_ops egress_ops = {
97548- eg_cache_add_entry, /* add_entry */
97549- eg_cache_get_by_cache_id, /* get_by_cache_id */
97550- eg_cache_get_by_tag, /* get_by_tag */
97551- eg_cache_get_by_vcc, /* get_by_vcc */
97552- eg_cache_get_by_src_ip, /* get_by_src_ip */
97553- eg_cache_put, /* put */
97554- eg_cache_remove_entry, /* remove_entry */
97555- update_eg_cache_entry, /* update */
97556- clear_expired, /* clear_expired */
97557- eg_destroy_cache /* destroy_cache */
97558+ .add_entry = eg_cache_add_entry,
97559+ .get_by_cache_id = eg_cache_get_by_cache_id,
97560+ .get_by_tag = eg_cache_get_by_tag,
97561+ .get_by_vcc = eg_cache_get_by_vcc,
97562+ .get_by_src_ip = eg_cache_get_by_src_ip,
97563+ .put = eg_cache_put,
97564+ .remove_entry = eg_cache_remove_entry,
97565+ .update = update_eg_cache_entry,
97566+ .clear_expired = clear_expired,
97567+ .destroy_cache = eg_destroy_cache
97568 };
97569
97570
97571diff --git a/net/atm/proc.c b/net/atm/proc.c
97572index bbb6461..cf04016 100644
97573--- a/net/atm/proc.c
97574+++ b/net/atm/proc.c
97575@@ -45,9 +45,9 @@ static void add_stats(struct seq_file *seq, const char *aal,
97576 const struct k_atm_aal_stats *stats)
97577 {
97578 seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
97579- atomic_read(&stats->tx), atomic_read(&stats->tx_err),
97580- atomic_read(&stats->rx), atomic_read(&stats->rx_err),
97581- atomic_read(&stats->rx_drop));
97582+ atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
97583+ atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
97584+ atomic_read_unchecked(&stats->rx_drop));
97585 }
97586
97587 static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
97588diff --git a/net/atm/resources.c b/net/atm/resources.c
97589index 0447d5d..3cf4728 100644
97590--- a/net/atm/resources.c
97591+++ b/net/atm/resources.c
97592@@ -160,7 +160,7 @@ EXPORT_SYMBOL(atm_dev_deregister);
97593 static void copy_aal_stats(struct k_atm_aal_stats *from,
97594 struct atm_aal_stats *to)
97595 {
97596-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
97597+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
97598 __AAL_STAT_ITEMS
97599 #undef __HANDLE_ITEM
97600 }
97601@@ -168,7 +168,7 @@ static void copy_aal_stats(struct k_atm_aal_stats *from,
97602 static void subtract_aal_stats(struct k_atm_aal_stats *from,
97603 struct atm_aal_stats *to)
97604 {
97605-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
97606+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
97607 __AAL_STAT_ITEMS
97608 #undef __HANDLE_ITEM
97609 }
97610diff --git a/net/ax25/sysctl_net_ax25.c b/net/ax25/sysctl_net_ax25.c
97611index 919a5ce..cc6b444 100644
97612--- a/net/ax25/sysctl_net_ax25.c
97613+++ b/net/ax25/sysctl_net_ax25.c
97614@@ -152,7 +152,7 @@ int ax25_register_dev_sysctl(ax25_dev *ax25_dev)
97615 {
97616 char path[sizeof("net/ax25/") + IFNAMSIZ];
97617 int k;
97618- struct ctl_table *table;
97619+ ctl_table_no_const *table;
97620
97621 table = kmemdup(ax25_param_table, sizeof(ax25_param_table), GFP_KERNEL);
97622 if (!table)
97623diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
97624index d074d06..ad3cfcf 100644
97625--- a/net/batman-adv/bat_iv_ogm.c
97626+++ b/net/batman-adv/bat_iv_ogm.c
97627@@ -312,7 +312,7 @@ static int batadv_iv_ogm_iface_enable(struct batadv_hard_iface *hard_iface)
97628
97629 /* randomize initial seqno to avoid collision */
97630 get_random_bytes(&random_seqno, sizeof(random_seqno));
97631- atomic_set(&hard_iface->bat_iv.ogm_seqno, random_seqno);
97632+ atomic_set_unchecked(&hard_iface->bat_iv.ogm_seqno, random_seqno);
97633
97634 hard_iface->bat_iv.ogm_buff_len = BATADV_OGM_HLEN;
97635 ogm_buff = kmalloc(hard_iface->bat_iv.ogm_buff_len, GFP_ATOMIC);
97636@@ -917,9 +917,9 @@ static void batadv_iv_ogm_schedule(struct batadv_hard_iface *hard_iface)
97637 batadv_ogm_packet->tvlv_len = htons(tvlv_len);
97638
97639 /* change sequence number to network order */
97640- seqno = (uint32_t)atomic_read(&hard_iface->bat_iv.ogm_seqno);
97641+ seqno = (uint32_t)atomic_read_unchecked(&hard_iface->bat_iv.ogm_seqno);
97642 batadv_ogm_packet->seqno = htonl(seqno);
97643- atomic_inc(&hard_iface->bat_iv.ogm_seqno);
97644+ atomic_inc_unchecked(&hard_iface->bat_iv.ogm_seqno);
97645
97646 batadv_iv_ogm_slide_own_bcast_window(hard_iface);
97647
97648@@ -1596,7 +1596,7 @@ static void batadv_iv_ogm_process(const struct sk_buff *skb, int ogm_offset,
97649 return;
97650
97651 /* could be changed by schedule_own_packet() */
97652- if_incoming_seqno = atomic_read(&if_incoming->bat_iv.ogm_seqno);
97653+ if_incoming_seqno = atomic_read_unchecked(&if_incoming->bat_iv.ogm_seqno);
97654
97655 if (ogm_packet->flags & BATADV_DIRECTLINK)
97656 has_directlink_flag = true;
97657diff --git a/net/batman-adv/fragmentation.c b/net/batman-adv/fragmentation.c
97658index cc1cfd6..7a68e022 100644
97659--- a/net/batman-adv/fragmentation.c
97660+++ b/net/batman-adv/fragmentation.c
97661@@ -446,7 +446,7 @@ bool batadv_frag_send_packet(struct sk_buff *skb,
97662 frag_header.packet_type = BATADV_UNICAST_FRAG;
97663 frag_header.version = BATADV_COMPAT_VERSION;
97664 frag_header.ttl = BATADV_TTL;
97665- frag_header.seqno = htons(atomic_inc_return(&bat_priv->frag_seqno));
97666+ frag_header.seqno = htons(atomic_inc_return_unchecked(&bat_priv->frag_seqno));
97667 frag_header.reserved = 0;
97668 frag_header.no = 0;
97669 frag_header.total_size = htons(skb->len);
97670diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
97671index f82c267..0e56d32 100644
97672--- a/net/batman-adv/soft-interface.c
97673+++ b/net/batman-adv/soft-interface.c
97674@@ -283,7 +283,7 @@ send:
97675 primary_if->net_dev->dev_addr, ETH_ALEN);
97676
97677 /* set broadcast sequence number */
97678- seqno = atomic_inc_return(&bat_priv->bcast_seqno);
97679+ seqno = atomic_inc_return_unchecked(&bat_priv->bcast_seqno);
97680 bcast_packet->seqno = htonl(seqno);
97681
97682 batadv_add_bcast_packet_to_list(bat_priv, skb, brd_delay);
97683@@ -707,7 +707,7 @@ static int batadv_softif_init_late(struct net_device *dev)
97684 atomic_set(&bat_priv->batman_queue_left, BATADV_BATMAN_QUEUE_LEN);
97685
97686 atomic_set(&bat_priv->mesh_state, BATADV_MESH_INACTIVE);
97687- atomic_set(&bat_priv->bcast_seqno, 1);
97688+ atomic_set_unchecked(&bat_priv->bcast_seqno, 1);
97689 atomic_set(&bat_priv->tt.vn, 0);
97690 atomic_set(&bat_priv->tt.local_changes, 0);
97691 atomic_set(&bat_priv->tt.ogm_append_cnt, 0);
97692@@ -721,7 +721,7 @@ static int batadv_softif_init_late(struct net_device *dev)
97693
97694 /* randomize initial seqno to avoid collision */
97695 get_random_bytes(&random_seqno, sizeof(random_seqno));
97696- atomic_set(&bat_priv->frag_seqno, random_seqno);
97697+ atomic_set_unchecked(&bat_priv->frag_seqno, random_seqno);
97698
97699 bat_priv->primary_if = NULL;
97700 bat_priv->num_ifaces = 0;
97701diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
97702index 78370ab..1cb3614 100644
97703--- a/net/batman-adv/types.h
97704+++ b/net/batman-adv/types.h
97705@@ -66,7 +66,7 @@ enum batadv_dhcp_recipient {
97706 struct batadv_hard_iface_bat_iv {
97707 unsigned char *ogm_buff;
97708 int ogm_buff_len;
97709- atomic_t ogm_seqno;
97710+ atomic_unchecked_t ogm_seqno;
97711 };
97712
97713 /**
97714@@ -714,7 +714,7 @@ struct batadv_priv {
97715 atomic_t bonding;
97716 atomic_t fragmentation;
97717 atomic_t packet_size_max;
97718- atomic_t frag_seqno;
97719+ atomic_unchecked_t frag_seqno;
97720 #ifdef CONFIG_BATMAN_ADV_BLA
97721 atomic_t bridge_loop_avoidance;
97722 #endif
97723@@ -730,7 +730,7 @@ struct batadv_priv {
97724 #endif
97725 uint32_t isolation_mark;
97726 uint32_t isolation_mark_mask;
97727- atomic_t bcast_seqno;
97728+ atomic_unchecked_t bcast_seqno;
97729 atomic_t bcast_queue_left;
97730 atomic_t batman_queue_left;
97731 char num_ifaces;
97732diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
97733index 7552f9e..074ce29 100644
97734--- a/net/bluetooth/hci_sock.c
97735+++ b/net/bluetooth/hci_sock.c
97736@@ -1052,7 +1052,7 @@ static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
97737 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
97738 }
97739
97740- len = min_t(unsigned int, len, sizeof(uf));
97741+ len = min((size_t)len, sizeof(uf));
97742 if (copy_from_user(&uf, optval, len)) {
97743 err = -EFAULT;
97744 break;
97745diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
97746index b0ad2c7..96f6a5e 100644
97747--- a/net/bluetooth/l2cap_core.c
97748+++ b/net/bluetooth/l2cap_core.c
97749@@ -3740,8 +3740,10 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
97750 break;
97751
97752 case L2CAP_CONF_RFC:
97753- if (olen == sizeof(rfc))
97754- memcpy(&rfc, (void *)val, olen);
97755+ if (olen != sizeof(rfc))
97756+ break;
97757+
97758+ memcpy(&rfc, (void *)val, olen);
97759
97760 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
97761 rfc.mode != chan->mode)
97762diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
97763index d58f76b..b69600a 100644
97764--- a/net/bluetooth/l2cap_sock.c
97765+++ b/net/bluetooth/l2cap_sock.c
97766@@ -625,7 +625,8 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname,
97767 struct sock *sk = sock->sk;
97768 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
97769 struct l2cap_options opts;
97770- int len, err = 0;
97771+ int err = 0;
97772+ size_t len = optlen;
97773 u32 opt;
97774
97775 BT_DBG("sk %p", sk);
97776@@ -652,7 +653,7 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname,
97777 opts.max_tx = chan->max_tx;
97778 opts.txwin_size = chan->tx_win;
97779
97780- len = min_t(unsigned int, sizeof(opts), optlen);
97781+ len = min(sizeof(opts), len);
97782 if (copy_from_user((char *) &opts, optval, len)) {
97783 err = -EFAULT;
97784 break;
97785@@ -734,7 +735,8 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
97786 struct bt_security sec;
97787 struct bt_power pwr;
97788 struct l2cap_conn *conn;
97789- int len, err = 0;
97790+ int err = 0;
97791+ size_t len = optlen;
97792 u32 opt;
97793
97794 BT_DBG("sk %p", sk);
97795@@ -757,7 +759,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
97796
97797 sec.level = BT_SECURITY_LOW;
97798
97799- len = min_t(unsigned int, sizeof(sec), optlen);
97800+ len = min(sizeof(sec), len);
97801 if (copy_from_user((char *) &sec, optval, len)) {
97802 err = -EFAULT;
97803 break;
97804@@ -857,7 +859,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
97805
97806 pwr.force_active = BT_POWER_FORCE_ACTIVE_ON;
97807
97808- len = min_t(unsigned int, sizeof(pwr), optlen);
97809+ len = min(sizeof(pwr), len);
97810 if (copy_from_user((char *) &pwr, optval, len)) {
97811 err = -EFAULT;
97812 break;
97813diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
97814index 3c2d3e4..884855a 100644
97815--- a/net/bluetooth/rfcomm/sock.c
97816+++ b/net/bluetooth/rfcomm/sock.c
97817@@ -672,7 +672,7 @@ static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, c
97818 struct sock *sk = sock->sk;
97819 struct bt_security sec;
97820 int err = 0;
97821- size_t len;
97822+ size_t len = optlen;
97823 u32 opt;
97824
97825 BT_DBG("sk %p", sk);
97826@@ -694,7 +694,7 @@ static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, c
97827
97828 sec.level = BT_SECURITY_LOW;
97829
97830- len = min_t(unsigned int, sizeof(sec), optlen);
97831+ len = min(sizeof(sec), len);
97832 if (copy_from_user((char *) &sec, optval, len)) {
97833 err = -EFAULT;
97834 break;
97835diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c
97836index f9c0980a..fcbbfeb 100644
97837--- a/net/bluetooth/rfcomm/tty.c
97838+++ b/net/bluetooth/rfcomm/tty.c
97839@@ -717,7 +717,7 @@ static int rfcomm_tty_open(struct tty_struct *tty, struct file *filp)
97840 BT_DBG("tty %p id %d", tty, tty->index);
97841
97842 BT_DBG("dev %p dst %pMR channel %d opened %d", dev, &dev->dst,
97843- dev->channel, dev->port.count);
97844+ dev->channel, atomic_read(&dev->port.count));
97845
97846 err = tty_port_open(&dev->port, tty, filp);
97847 if (err)
97848@@ -740,7 +740,7 @@ static void rfcomm_tty_close(struct tty_struct *tty, struct file *filp)
97849 struct rfcomm_dev *dev = (struct rfcomm_dev *) tty->driver_data;
97850
97851 BT_DBG("tty %p dev %p dlc %p opened %d", tty, dev, dev->dlc,
97852- dev->port.count);
97853+ atomic_read(&dev->port.count));
97854
97855 tty_port_close(&dev->port, tty, filp);
97856 }
97857diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
97858index 1059ed3..d70846a 100644
97859--- a/net/bridge/netfilter/ebtables.c
97860+++ b/net/bridge/netfilter/ebtables.c
97861@@ -1524,7 +1524,7 @@ static int do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
97862 tmp.valid_hooks = t->table->valid_hooks;
97863 }
97864 mutex_unlock(&ebt_mutex);
97865- if (copy_to_user(user, &tmp, *len) != 0) {
97866+ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0) {
97867 BUGPRINT("c2u Didn't work\n");
97868 ret = -EFAULT;
97869 break;
97870@@ -2330,7 +2330,7 @@ static int compat_do_ebt_get_ctl(struct sock *sk, int cmd,
97871 goto out;
97872 tmp.valid_hooks = t->valid_hooks;
97873
97874- if (copy_to_user(user, &tmp, *len) != 0) {
97875+ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0) {
97876 ret = -EFAULT;
97877 break;
97878 }
97879@@ -2341,7 +2341,7 @@ static int compat_do_ebt_get_ctl(struct sock *sk, int cmd,
97880 tmp.entries_size = t->table->entries_size;
97881 tmp.valid_hooks = t->table->valid_hooks;
97882
97883- if (copy_to_user(user, &tmp, *len) != 0) {
97884+ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0) {
97885 ret = -EFAULT;
97886 break;
97887 }
97888diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c
97889index 0f45522..dab651f 100644
97890--- a/net/caif/cfctrl.c
97891+++ b/net/caif/cfctrl.c
97892@@ -10,6 +10,7 @@
97893 #include <linux/spinlock.h>
97894 #include <linux/slab.h>
97895 #include <linux/pkt_sched.h>
97896+#include <linux/sched.h>
97897 #include <net/caif/caif_layer.h>
97898 #include <net/caif/cfpkt.h>
97899 #include <net/caif/cfctrl.h>
97900@@ -43,8 +44,8 @@ struct cflayer *cfctrl_create(void)
97901 memset(&dev_info, 0, sizeof(dev_info));
97902 dev_info.id = 0xff;
97903 cfsrvl_init(&this->serv, 0, &dev_info, false);
97904- atomic_set(&this->req_seq_no, 1);
97905- atomic_set(&this->rsp_seq_no, 1);
97906+ atomic_set_unchecked(&this->req_seq_no, 1);
97907+ atomic_set_unchecked(&this->rsp_seq_no, 1);
97908 this->serv.layer.receive = cfctrl_recv;
97909 sprintf(this->serv.layer.name, "ctrl");
97910 this->serv.layer.ctrlcmd = cfctrl_ctrlcmd;
97911@@ -130,8 +131,8 @@ static void cfctrl_insert_req(struct cfctrl *ctrl,
97912 struct cfctrl_request_info *req)
97913 {
97914 spin_lock_bh(&ctrl->info_list_lock);
97915- atomic_inc(&ctrl->req_seq_no);
97916- req->sequence_no = atomic_read(&ctrl->req_seq_no);
97917+ atomic_inc_unchecked(&ctrl->req_seq_no);
97918+ req->sequence_no = atomic_read_unchecked(&ctrl->req_seq_no);
97919 list_add_tail(&req->list, &ctrl->list);
97920 spin_unlock_bh(&ctrl->info_list_lock);
97921 }
97922@@ -149,7 +150,7 @@ static struct cfctrl_request_info *cfctrl_remove_req(struct cfctrl *ctrl,
97923 if (p != first)
97924 pr_warn("Requests are not received in order\n");
97925
97926- atomic_set(&ctrl->rsp_seq_no,
97927+ atomic_set_unchecked(&ctrl->rsp_seq_no,
97928 p->sequence_no);
97929 list_del(&p->list);
97930 goto out;
97931diff --git a/net/can/af_can.c b/net/can/af_can.c
97932index a27f8aa..67174a3 100644
97933--- a/net/can/af_can.c
97934+++ b/net/can/af_can.c
97935@@ -863,7 +863,7 @@ static const struct net_proto_family can_family_ops = {
97936 };
97937
97938 /* notifier block for netdevice event */
97939-static struct notifier_block can_netdev_notifier __read_mostly = {
97940+static struct notifier_block can_netdev_notifier = {
97941 .notifier_call = can_notifier,
97942 };
97943
97944diff --git a/net/can/bcm.c b/net/can/bcm.c
97945index dcb75c0..24b1b43 100644
97946--- a/net/can/bcm.c
97947+++ b/net/can/bcm.c
97948@@ -1624,7 +1624,7 @@ static int __init bcm_module_init(void)
97949 }
97950
97951 /* create /proc/net/can-bcm directory */
97952- proc_dir = proc_mkdir("can-bcm", init_net.proc_net);
97953+ proc_dir = proc_mkdir_restrict("can-bcm", init_net.proc_net);
97954 return 0;
97955 }
97956
97957diff --git a/net/can/gw.c b/net/can/gw.c
97958index ac31891..4799c17 100644
97959--- a/net/can/gw.c
97960+++ b/net/can/gw.c
97961@@ -80,7 +80,6 @@ MODULE_PARM_DESC(max_hops,
97962 "default: " __stringify(CGW_DEFAULT_HOPS) ")");
97963
97964 static HLIST_HEAD(cgw_list);
97965-static struct notifier_block notifier;
97966
97967 static struct kmem_cache *cgw_cache __read_mostly;
97968
97969@@ -947,6 +946,10 @@ static int cgw_remove_job(struct sk_buff *skb, struct nlmsghdr *nlh)
97970 return err;
97971 }
97972
97973+static struct notifier_block notifier = {
97974+ .notifier_call = cgw_notifier
97975+};
97976+
97977 static __init int cgw_module_init(void)
97978 {
97979 /* sanitize given module parameter */
97980@@ -962,7 +965,6 @@ static __init int cgw_module_init(void)
97981 return -ENOMEM;
97982
97983 /* set notifier */
97984- notifier.notifier_call = cgw_notifier;
97985 register_netdevice_notifier(&notifier);
97986
97987 if (__rtnl_register(PF_CAN, RTM_GETROUTE, NULL, cgw_dump_jobs, NULL)) {
97988diff --git a/net/can/proc.c b/net/can/proc.c
97989index b543470..d2ddae2 100644
97990--- a/net/can/proc.c
97991+++ b/net/can/proc.c
97992@@ -468,7 +468,7 @@ static void can_remove_proc_readentry(const char *name)
97993 void can_init_proc(void)
97994 {
97995 /* create /proc/net/can directory */
97996- can_dir = proc_mkdir("can", init_net.proc_net);
97997+ can_dir = proc_mkdir_restrict("can", init_net.proc_net);
97998
97999 if (!can_dir) {
98000 printk(KERN_INFO "can: failed to create /proc/net/can . "
98001diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
98002index 988721a..947846d 100644
98003--- a/net/ceph/messenger.c
98004+++ b/net/ceph/messenger.c
98005@@ -187,7 +187,7 @@ static void con_fault(struct ceph_connection *con);
98006 #define MAX_ADDR_STR_LEN 64 /* 54 is enough */
98007
98008 static char addr_str[ADDR_STR_COUNT][MAX_ADDR_STR_LEN];
98009-static atomic_t addr_str_seq = ATOMIC_INIT(0);
98010+static atomic_unchecked_t addr_str_seq = ATOMIC_INIT(0);
98011
98012 static struct page *zero_page; /* used in certain error cases */
98013
98014@@ -198,7 +198,7 @@ const char *ceph_pr_addr(const struct sockaddr_storage *ss)
98015 struct sockaddr_in *in4 = (struct sockaddr_in *) ss;
98016 struct sockaddr_in6 *in6 = (struct sockaddr_in6 *) ss;
98017
98018- i = atomic_inc_return(&addr_str_seq) & ADDR_STR_COUNT_MASK;
98019+ i = atomic_inc_return_unchecked(&addr_str_seq) & ADDR_STR_COUNT_MASK;
98020 s = addr_str[i];
98021
98022 switch (ss->ss_family) {
98023diff --git a/net/compat.c b/net/compat.c
98024index f50161f..94fa415 100644
98025--- a/net/compat.c
98026+++ b/net/compat.c
98027@@ -73,9 +73,9 @@ int get_compat_msghdr(struct msghdr *kmsg, struct compat_msghdr __user *umsg)
98028 return -EFAULT;
98029 if (kmsg->msg_namelen > sizeof(struct sockaddr_storage))
98030 kmsg->msg_namelen = sizeof(struct sockaddr_storage);
98031- kmsg->msg_name = compat_ptr(tmp1);
98032- kmsg->msg_iov = compat_ptr(tmp2);
98033- kmsg->msg_control = compat_ptr(tmp3);
98034+ kmsg->msg_name = (void __force_kernel *)compat_ptr(tmp1);
98035+ kmsg->msg_iov = (void __force_kernel *)compat_ptr(tmp2);
98036+ kmsg->msg_control = (void __force_kernel *)compat_ptr(tmp3);
98037 return 0;
98038 }
98039
98040@@ -87,7 +87,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
98041
98042 if (kern_msg->msg_namelen) {
98043 if (mode == VERIFY_READ) {
98044- int err = move_addr_to_kernel(kern_msg->msg_name,
98045+ int err = move_addr_to_kernel((void __force_user *)kern_msg->msg_name,
98046 kern_msg->msg_namelen,
98047 kern_address);
98048 if (err < 0)
98049@@ -99,7 +99,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
98050 kern_msg->msg_name = NULL;
98051
98052 tot_len = iov_from_user_compat_to_kern(kern_iov,
98053- (struct compat_iovec __user *)kern_msg->msg_iov,
98054+ (struct compat_iovec __force_user *)kern_msg->msg_iov,
98055 kern_msg->msg_iovlen);
98056 if (tot_len >= 0)
98057 kern_msg->msg_iov = kern_iov;
98058@@ -119,20 +119,20 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
98059
98060 #define CMSG_COMPAT_FIRSTHDR(msg) \
98061 (((msg)->msg_controllen) >= sizeof(struct compat_cmsghdr) ? \
98062- (struct compat_cmsghdr __user *)((msg)->msg_control) : \
98063+ (struct compat_cmsghdr __force_user *)((msg)->msg_control) : \
98064 (struct compat_cmsghdr __user *)NULL)
98065
98066 #define CMSG_COMPAT_OK(ucmlen, ucmsg, mhdr) \
98067 ((ucmlen) >= sizeof(struct compat_cmsghdr) && \
98068 (ucmlen) <= (unsigned long) \
98069 ((mhdr)->msg_controllen - \
98070- ((char *)(ucmsg) - (char *)(mhdr)->msg_control)))
98071+ ((char __force_kernel *)(ucmsg) - (char *)(mhdr)->msg_control)))
98072
98073 static inline struct compat_cmsghdr __user *cmsg_compat_nxthdr(struct msghdr *msg,
98074 struct compat_cmsghdr __user *cmsg, int cmsg_len)
98075 {
98076 char __user *ptr = (char __user *)cmsg + CMSG_COMPAT_ALIGN(cmsg_len);
98077- if ((unsigned long)(ptr + 1 - (char __user *)msg->msg_control) >
98078+ if ((unsigned long)(ptr + 1 - (char __force_user *)msg->msg_control) >
98079 msg->msg_controllen)
98080 return NULL;
98081 return (struct compat_cmsghdr __user *)ptr;
98082@@ -222,7 +222,7 @@ Efault:
98083
98084 int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *data)
98085 {
98086- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
98087+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
98088 struct compat_cmsghdr cmhdr;
98089 struct compat_timeval ctv;
98090 struct compat_timespec cts[3];
98091@@ -278,7 +278,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
98092
98093 void scm_detach_fds_compat(struct msghdr *kmsg, struct scm_cookie *scm)
98094 {
98095- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
98096+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
98097 int fdmax = (kmsg->msg_controllen - sizeof(struct compat_cmsghdr)) / sizeof(int);
98098 int fdnum = scm->fp->count;
98099 struct file **fp = scm->fp->fp;
98100@@ -366,7 +366,7 @@ static int do_set_sock_timeout(struct socket *sock, int level,
98101 return -EFAULT;
98102 old_fs = get_fs();
98103 set_fs(KERNEL_DS);
98104- err = sock_setsockopt(sock, level, optname, (char *)&ktime, sizeof(ktime));
98105+ err = sock_setsockopt(sock, level, optname, (char __force_user *)&ktime, sizeof(ktime));
98106 set_fs(old_fs);
98107
98108 return err;
98109@@ -427,7 +427,7 @@ static int do_get_sock_timeout(struct socket *sock, int level, int optname,
98110 len = sizeof(ktime);
98111 old_fs = get_fs();
98112 set_fs(KERNEL_DS);
98113- err = sock_getsockopt(sock, level, optname, (char *) &ktime, &len);
98114+ err = sock_getsockopt(sock, level, optname, (char __force_user *) &ktime, (int __force_user *)&len);
98115 set_fs(old_fs);
98116
98117 if (!err) {
98118@@ -570,7 +570,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
98119 case MCAST_JOIN_GROUP:
98120 case MCAST_LEAVE_GROUP:
98121 {
98122- struct compat_group_req __user *gr32 = (void *)optval;
98123+ struct compat_group_req __user *gr32 = (void __user *)optval;
98124 struct group_req __user *kgr =
98125 compat_alloc_user_space(sizeof(struct group_req));
98126 u32 interface;
98127@@ -591,7 +591,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
98128 case MCAST_BLOCK_SOURCE:
98129 case MCAST_UNBLOCK_SOURCE:
98130 {
98131- struct compat_group_source_req __user *gsr32 = (void *)optval;
98132+ struct compat_group_source_req __user *gsr32 = (void __user *)optval;
98133 struct group_source_req __user *kgsr = compat_alloc_user_space(
98134 sizeof(struct group_source_req));
98135 u32 interface;
98136@@ -612,7 +612,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
98137 }
98138 case MCAST_MSFILTER:
98139 {
98140- struct compat_group_filter __user *gf32 = (void *)optval;
98141+ struct compat_group_filter __user *gf32 = (void __user *)optval;
98142 struct group_filter __user *kgf;
98143 u32 interface, fmode, numsrc;
98144
98145@@ -650,7 +650,7 @@ int compat_mc_getsockopt(struct sock *sock, int level, int optname,
98146 char __user *optval, int __user *optlen,
98147 int (*getsockopt)(struct sock *, int, int, char __user *, int __user *))
98148 {
98149- struct compat_group_filter __user *gf32 = (void *)optval;
98150+ struct compat_group_filter __user *gf32 = (void __user *)optval;
98151 struct group_filter __user *kgf;
98152 int __user *koptlen;
98153 u32 interface, fmode, numsrc;
98154@@ -803,7 +803,7 @@ asmlinkage long compat_sys_socketcall(int call, u32 __user *args)
98155
98156 if (call < SYS_SOCKET || call > SYS_SENDMMSG)
98157 return -EINVAL;
98158- if (copy_from_user(a, args, nas[call]))
98159+ if (nas[call] > sizeof a || copy_from_user(a, args, nas[call]))
98160 return -EFAULT;
98161 a0 = a[0];
98162 a1 = a[1];
98163diff --git a/net/core/datagram.c b/net/core/datagram.c
98164index a16ed7b..eb44d17 100644
98165--- a/net/core/datagram.c
98166+++ b/net/core/datagram.c
98167@@ -301,7 +301,7 @@ int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags)
98168 }
98169
98170 kfree_skb(skb);
98171- atomic_inc(&sk->sk_drops);
98172+ atomic_inc_unchecked(&sk->sk_drops);
98173 sk_mem_reclaim_partial(sk);
98174
98175 return err;
98176diff --git a/net/core/dev.c b/net/core/dev.c
98177index fccc195..c8486ab 100644
98178--- a/net/core/dev.c
98179+++ b/net/core/dev.c
98180@@ -1688,14 +1688,14 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
98181 {
98182 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
98183 if (skb_copy_ubufs(skb, GFP_ATOMIC)) {
98184- atomic_long_inc(&dev->rx_dropped);
98185+ atomic_long_inc_unchecked(&dev->rx_dropped);
98186 kfree_skb(skb);
98187 return NET_RX_DROP;
98188 }
98189 }
98190
98191 if (unlikely(!is_skb_forwardable(dev, skb))) {
98192- atomic_long_inc(&dev->rx_dropped);
98193+ atomic_long_inc_unchecked(&dev->rx_dropped);
98194 kfree_skb(skb);
98195 return NET_RX_DROP;
98196 }
98197@@ -2453,7 +2453,7 @@ static int illegal_highdma(const struct net_device *dev, struct sk_buff *skb)
98198
98199 struct dev_gso_cb {
98200 void (*destructor)(struct sk_buff *skb);
98201-};
98202+} __no_const;
98203
98204 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
98205
98206@@ -3227,7 +3227,7 @@ enqueue:
98207
98208 local_irq_restore(flags);
98209
98210- atomic_long_inc(&skb->dev->rx_dropped);
98211+ atomic_long_inc_unchecked(&skb->dev->rx_dropped);
98212 kfree_skb(skb);
98213 return NET_RX_DROP;
98214 }
98215@@ -3308,7 +3308,7 @@ int netif_rx_ni(struct sk_buff *skb)
98216 }
98217 EXPORT_SYMBOL(netif_rx_ni);
98218
98219-static void net_tx_action(struct softirq_action *h)
98220+static __latent_entropy void net_tx_action(void)
98221 {
98222 struct softnet_data *sd = &__get_cpu_var(softnet_data);
98223
98224@@ -3645,7 +3645,7 @@ ncls:
98225 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
98226 } else {
98227 drop:
98228- atomic_long_inc(&skb->dev->rx_dropped);
98229+ atomic_long_inc_unchecked(&skb->dev->rx_dropped);
98230 kfree_skb(skb);
98231 /* Jamal, now you will not able to escape explaining
98232 * me how you were going to use this. :-)
98233@@ -4333,7 +4333,7 @@ void netif_napi_del(struct napi_struct *napi)
98234 }
98235 EXPORT_SYMBOL(netif_napi_del);
98236
98237-static void net_rx_action(struct softirq_action *h)
98238+static __latent_entropy void net_rx_action(void)
98239 {
98240 struct softnet_data *sd = &__get_cpu_var(softnet_data);
98241 unsigned long time_limit = jiffies + 2;
98242@@ -6302,7 +6302,7 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
98243 } else {
98244 netdev_stats_to_stats64(storage, &dev->stats);
98245 }
98246- storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
98247+ storage->rx_dropped += atomic_long_read_unchecked(&dev->rx_dropped);
98248 return storage;
98249 }
98250 EXPORT_SYMBOL(dev_get_stats);
98251diff --git a/net/core/dev_ioctl.c b/net/core/dev_ioctl.c
98252index cf999e0..c59a975 100644
98253--- a/net/core/dev_ioctl.c
98254+++ b/net/core/dev_ioctl.c
98255@@ -366,9 +366,13 @@ void dev_load(struct net *net, const char *name)
98256 if (no_module && capable(CAP_NET_ADMIN))
98257 no_module = request_module("netdev-%s", name);
98258 if (no_module && capable(CAP_SYS_MODULE)) {
98259+#ifdef CONFIG_GRKERNSEC_MODHARDEN
98260+ ___request_module(true, "grsec_modharden_netdev", "%s", name);
98261+#else
98262 if (!request_module("%s", name))
98263 pr_warn("Loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s instead.\n",
98264 name);
98265+#endif
98266 }
98267 }
98268 EXPORT_SYMBOL(dev_load);
98269diff --git a/net/core/filter.c b/net/core/filter.c
98270index ebce437..9fed9d0 100644
98271--- a/net/core/filter.c
98272+++ b/net/core/filter.c
98273@@ -126,7 +126,7 @@ unsigned int sk_run_filter(const struct sk_buff *skb,
98274 void *ptr;
98275 u32 A = 0; /* Accumulator */
98276 u32 X = 0; /* Index Register */
98277- u32 mem[BPF_MEMWORDS]; /* Scratch Memory Store */
98278+ u32 mem[BPF_MEMWORDS] = {}; /* Scratch Memory Store */
98279 u32 tmp;
98280 int k;
98281
98282@@ -292,10 +292,10 @@ load_b:
98283 X = K;
98284 continue;
98285 case BPF_S_LD_MEM:
98286- A = mem[K];
98287+ A = mem[K&15];
98288 continue;
98289 case BPF_S_LDX_MEM:
98290- X = mem[K];
98291+ X = mem[K&15];
98292 continue;
98293 case BPF_S_MISC_TAX:
98294 X = A;
98295@@ -308,10 +308,10 @@ load_b:
98296 case BPF_S_RET_A:
98297 return A;
98298 case BPF_S_ST:
98299- mem[K] = A;
98300+ mem[K&15] = A;
98301 continue;
98302 case BPF_S_STX:
98303- mem[K] = X;
98304+ mem[K&15] = X;
98305 continue;
98306 case BPF_S_ANC_PROTOCOL:
98307 A = ntohs(skb->protocol);
98308@@ -395,9 +395,10 @@ load_b:
98309 continue;
98310 #endif
98311 default:
98312- WARN_RATELIMIT(1, "Unknown code:%u jt:%u tf:%u k:%u\n",
98313+ WARN(1, KERN_ALERT "Unknown sock filter code:%u jt:%u tf:%u k:%u\n",
98314 fentry->code, fentry->jt,
98315 fentry->jf, fentry->k);
98316+ BUG();
98317 return 0;
98318 }
98319 }
98320@@ -420,7 +421,7 @@ static int check_load_and_stores(struct sock_filter *filter, int flen)
98321 u16 *masks, memvalid = 0; /* one bit per cell, 16 cells */
98322 int pc, ret = 0;
98323
98324- BUILD_BUG_ON(BPF_MEMWORDS > 16);
98325+ BUILD_BUG_ON(BPF_MEMWORDS != 16);
98326 masks = kmalloc(flen * sizeof(*masks), GFP_KERNEL);
98327 if (!masks)
98328 return -ENOMEM;
98329@@ -683,7 +684,7 @@ int sk_unattached_filter_create(struct sk_filter **pfp,
98330 fp = kmalloc(sk_filter_size(fprog->len), GFP_KERNEL);
98331 if (!fp)
98332 return -ENOMEM;
98333- memcpy(fp->insns, fprog->filter, fsize);
98334+ memcpy(fp->insns, (void __force_kernel *)fprog->filter, fsize);
98335
98336 atomic_set(&fp->refcnt, 1);
98337 fp->len = fprog->len;
98338diff --git a/net/core/flow.c b/net/core/flow.c
98339index dfa602c..3103d88 100644
98340--- a/net/core/flow.c
98341+++ b/net/core/flow.c
98342@@ -61,7 +61,7 @@ struct flow_cache {
98343 struct timer_list rnd_timer;
98344 };
98345
98346-atomic_t flow_cache_genid = ATOMIC_INIT(0);
98347+atomic_unchecked_t flow_cache_genid = ATOMIC_INIT(0);
98348 EXPORT_SYMBOL(flow_cache_genid);
98349 static struct flow_cache flow_cache_global;
98350 static struct kmem_cache *flow_cachep __read_mostly;
98351@@ -86,7 +86,7 @@ static void flow_cache_new_hashrnd(unsigned long arg)
98352
98353 static int flow_entry_valid(struct flow_cache_entry *fle)
98354 {
98355- if (atomic_read(&flow_cache_genid) != fle->genid)
98356+ if (atomic_read_unchecked(&flow_cache_genid) != fle->genid)
98357 return 0;
98358 if (fle->object && !fle->object->ops->check(fle->object))
98359 return 0;
98360@@ -258,7 +258,7 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
98361 hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]);
98362 fcp->hash_count++;
98363 }
98364- } else if (likely(fle->genid == atomic_read(&flow_cache_genid))) {
98365+ } else if (likely(fle->genid == atomic_read_unchecked(&flow_cache_genid))) {
98366 flo = fle->object;
98367 if (!flo)
98368 goto ret_object;
98369@@ -279,7 +279,7 @@ nocache:
98370 }
98371 flo = resolver(net, key, family, dir, flo, ctx);
98372 if (fle) {
98373- fle->genid = atomic_read(&flow_cache_genid);
98374+ fle->genid = atomic_read_unchecked(&flow_cache_genid);
98375 if (!IS_ERR(flo))
98376 fle->object = flo;
98377 else
98378diff --git a/net/core/iovec.c b/net/core/iovec.c
98379index b618694..192bbba 100644
98380--- a/net/core/iovec.c
98381+++ b/net/core/iovec.c
98382@@ -42,7 +42,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *a
98383 if (m->msg_namelen) {
98384 if (mode == VERIFY_READ) {
98385 void __user *namep;
98386- namep = (void __user __force *) m->msg_name;
98387+ namep = (void __force_user *) m->msg_name;
98388 err = move_addr_to_kernel(namep, m->msg_namelen,
98389 address);
98390 if (err < 0)
98391@@ -55,7 +55,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *a
98392 }
98393
98394 size = m->msg_iovlen * sizeof(struct iovec);
98395- if (copy_from_user(iov, (void __user __force *) m->msg_iov, size))
98396+ if (copy_from_user(iov, (void __force_user *) m->msg_iov, size))
98397 return -EFAULT;
98398
98399 m->msg_iov = iov;
98400diff --git a/net/core/neighbour.c b/net/core/neighbour.c
98401index 7d95f69..a6065de 100644
98402--- a/net/core/neighbour.c
98403+++ b/net/core/neighbour.c
98404@@ -2824,7 +2824,7 @@ static int proc_unres_qlen(struct ctl_table *ctl, int write,
98405 void __user *buffer, size_t *lenp, loff_t *ppos)
98406 {
98407 int size, ret;
98408- struct ctl_table tmp = *ctl;
98409+ ctl_table_no_const tmp = *ctl;
98410
98411 tmp.extra1 = &zero;
98412 tmp.extra2 = &unres_qlen_max;
98413@@ -2886,7 +2886,7 @@ static int neigh_proc_dointvec_zero_intmax(struct ctl_table *ctl, int write,
98414 void __user *buffer,
98415 size_t *lenp, loff_t *ppos)
98416 {
98417- struct ctl_table tmp = *ctl;
98418+ ctl_table_no_const tmp = *ctl;
98419 int ret;
98420
98421 tmp.extra1 = &zero;
98422@@ -3058,11 +3058,12 @@ int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
98423 memset(&t->neigh_vars[NEIGH_VAR_GC_INTERVAL], 0,
98424 sizeof(t->neigh_vars[NEIGH_VAR_GC_INTERVAL]));
98425 } else {
98426+ struct neigh_table *ntable = container_of(p, struct neigh_table, parms);
98427 dev_name_source = "default";
98428- t->neigh_vars[NEIGH_VAR_GC_INTERVAL].data = (int *)(p + 1);
98429- t->neigh_vars[NEIGH_VAR_GC_THRESH1].data = (int *)(p + 1) + 1;
98430- t->neigh_vars[NEIGH_VAR_GC_THRESH2].data = (int *)(p + 1) + 2;
98431- t->neigh_vars[NEIGH_VAR_GC_THRESH3].data = (int *)(p + 1) + 3;
98432+ t->neigh_vars[NEIGH_VAR_GC_INTERVAL].data = &ntable->gc_interval;
98433+ t->neigh_vars[NEIGH_VAR_GC_THRESH1].data = &ntable->gc_thresh1;
98434+ t->neigh_vars[NEIGH_VAR_GC_THRESH2].data = &ntable->gc_thresh2;
98435+ t->neigh_vars[NEIGH_VAR_GC_THRESH3].data = &ntable->gc_thresh3;
98436 }
98437
98438 if (handler) {
98439diff --git a/net/core/net-procfs.c b/net/core/net-procfs.c
98440index 2bf8329..2eb1423 100644
98441--- a/net/core/net-procfs.c
98442+++ b/net/core/net-procfs.c
98443@@ -79,7 +79,13 @@ static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
98444 struct rtnl_link_stats64 temp;
98445 const struct rtnl_link_stats64 *stats = dev_get_stats(dev, &temp);
98446
98447- seq_printf(seq, "%6s: %7llu %7llu %4llu %4llu %4llu %5llu %10llu %9llu "
98448+ if (gr_proc_is_restricted())
98449+ seq_printf(seq, "%6s: %7llu %7llu %4llu %4llu %4llu %5llu %10llu %9llu "
98450+ "%8llu %7llu %4llu %4llu %4llu %5llu %7llu %10llu\n",
98451+ dev->name, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL,
98452+ 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL);
98453+ else
98454+ seq_printf(seq, "%6s: %7llu %7llu %4llu %4llu %4llu %5llu %10llu %9llu "
98455 "%8llu %7llu %4llu %4llu %4llu %5llu %7llu %10llu\n",
98456 dev->name, stats->rx_bytes, stats->rx_packets,
98457 stats->rx_errors,
98458@@ -166,7 +172,7 @@ static int softnet_seq_show(struct seq_file *seq, void *v)
98459 return 0;
98460 }
98461
98462-static const struct seq_operations dev_seq_ops = {
98463+const struct seq_operations dev_seq_ops = {
98464 .start = dev_seq_start,
98465 .next = dev_seq_next,
98466 .stop = dev_seq_stop,
98467@@ -196,7 +202,7 @@ static const struct seq_operations softnet_seq_ops = {
98468
98469 static int softnet_seq_open(struct inode *inode, struct file *file)
98470 {
98471- return seq_open(file, &softnet_seq_ops);
98472+ return seq_open_restrict(file, &softnet_seq_ops);
98473 }
98474
98475 static const struct file_operations softnet_seq_fops = {
98476@@ -283,8 +289,13 @@ static int ptype_seq_show(struct seq_file *seq, void *v)
98477 else
98478 seq_printf(seq, "%04x", ntohs(pt->type));
98479
98480+#ifdef CONFIG_GRKERNSEC_HIDESYM
98481+ seq_printf(seq, " %-8s %pf\n",
98482+ pt->dev ? pt->dev->name : "", NULL);
98483+#else
98484 seq_printf(seq, " %-8s %pf\n",
98485 pt->dev ? pt->dev->name : "", pt->func);
98486+#endif
98487 }
98488
98489 return 0;
98490diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
98491index 7c8ffd9..0cb3687 100644
98492--- a/net/core/net_namespace.c
98493+++ b/net/core/net_namespace.c
98494@@ -443,7 +443,7 @@ static int __register_pernet_operations(struct list_head *list,
98495 int error;
98496 LIST_HEAD(net_exit_list);
98497
98498- list_add_tail(&ops->list, list);
98499+ pax_list_add_tail((struct list_head *)&ops->list, list);
98500 if (ops->init || (ops->id && ops->size)) {
98501 for_each_net(net) {
98502 error = ops_init(ops, net);
98503@@ -456,7 +456,7 @@ static int __register_pernet_operations(struct list_head *list,
98504
98505 out_undo:
98506 /* If I have an error cleanup all namespaces I initialized */
98507- list_del(&ops->list);
98508+ pax_list_del((struct list_head *)&ops->list);
98509 ops_exit_list(ops, &net_exit_list);
98510 ops_free_list(ops, &net_exit_list);
98511 return error;
98512@@ -467,7 +467,7 @@ static void __unregister_pernet_operations(struct pernet_operations *ops)
98513 struct net *net;
98514 LIST_HEAD(net_exit_list);
98515
98516- list_del(&ops->list);
98517+ pax_list_del((struct list_head *)&ops->list);
98518 for_each_net(net)
98519 list_add_tail(&net->exit_list, &net_exit_list);
98520 ops_exit_list(ops, &net_exit_list);
98521@@ -601,7 +601,7 @@ int register_pernet_device(struct pernet_operations *ops)
98522 mutex_lock(&net_mutex);
98523 error = register_pernet_operations(&pernet_list, ops);
98524 if (!error && (first_device == &pernet_list))
98525- first_device = &ops->list;
98526+ first_device = (struct list_head *)&ops->list;
98527 mutex_unlock(&net_mutex);
98528 return error;
98529 }
98530diff --git a/net/core/netpoll.c b/net/core/netpoll.c
98531index df9e6b1..6e68e4e 100644
98532--- a/net/core/netpoll.c
98533+++ b/net/core/netpoll.c
98534@@ -435,7 +435,7 @@ void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
98535 struct udphdr *udph;
98536 struct iphdr *iph;
98537 struct ethhdr *eth;
98538- static atomic_t ip_ident;
98539+ static atomic_unchecked_t ip_ident;
98540 struct ipv6hdr *ip6h;
98541
98542 udp_len = len + sizeof(*udph);
98543@@ -506,7 +506,7 @@ void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
98544 put_unaligned(0x45, (unsigned char *)iph);
98545 iph->tos = 0;
98546 put_unaligned(htons(ip_len), &(iph->tot_len));
98547- iph->id = htons(atomic_inc_return(&ip_ident));
98548+ iph->id = htons(atomic_inc_return_unchecked(&ip_ident));
98549 iph->frag_off = 0;
98550 iph->ttl = 64;
98551 iph->protocol = IPPROTO_UDP;
98552diff --git a/net/core/pktgen.c b/net/core/pktgen.c
98553index fdac61c..e5e5b46 100644
98554--- a/net/core/pktgen.c
98555+++ b/net/core/pktgen.c
98556@@ -3719,7 +3719,7 @@ static int __net_init pg_net_init(struct net *net)
98557 pn->net = net;
98558 INIT_LIST_HEAD(&pn->pktgen_threads);
98559 pn->pktgen_exiting = false;
98560- pn->proc_dir = proc_mkdir(PG_PROC_DIR, pn->net->proc_net);
98561+ pn->proc_dir = proc_mkdir_restrict(PG_PROC_DIR, pn->net->proc_net);
98562 if (!pn->proc_dir) {
98563 pr_warn("cannot create /proc/net/%s\n", PG_PROC_DIR);
98564 return -ENODEV;
98565diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
98566index 83b9d6a..cff1ce7 100644
98567--- a/net/core/rtnetlink.c
98568+++ b/net/core/rtnetlink.c
98569@@ -58,7 +58,7 @@ struct rtnl_link {
98570 rtnl_doit_func doit;
98571 rtnl_dumpit_func dumpit;
98572 rtnl_calcit_func calcit;
98573-};
98574+} __no_const;
98575
98576 static DEFINE_MUTEX(rtnl_mutex);
98577
98578@@ -299,10 +299,13 @@ int __rtnl_link_register(struct rtnl_link_ops *ops)
98579 if (rtnl_link_ops_get(ops->kind))
98580 return -EEXIST;
98581
98582- if (!ops->dellink)
98583- ops->dellink = unregister_netdevice_queue;
98584+ if (!ops->dellink) {
98585+ pax_open_kernel();
98586+ *(void **)&ops->dellink = unregister_netdevice_queue;
98587+ pax_close_kernel();
98588+ }
98589
98590- list_add_tail(&ops->list, &link_ops);
98591+ pax_list_add_tail((struct list_head *)&ops->list, &link_ops);
98592 return 0;
98593 }
98594 EXPORT_SYMBOL_GPL(__rtnl_link_register);
98595@@ -349,7 +352,7 @@ void __rtnl_link_unregister(struct rtnl_link_ops *ops)
98596 for_each_net(net) {
98597 __rtnl_kill_links(net, ops);
98598 }
98599- list_del(&ops->list);
98600+ pax_list_del((struct list_head *)&ops->list);
98601 }
98602 EXPORT_SYMBOL_GPL(__rtnl_link_unregister);
98603
98604diff --git a/net/core/scm.c b/net/core/scm.c
98605index b442e7e..6f5b5a2 100644
98606--- a/net/core/scm.c
98607+++ b/net/core/scm.c
98608@@ -210,7 +210,7 @@ EXPORT_SYMBOL(__scm_send);
98609 int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
98610 {
98611 struct cmsghdr __user *cm
98612- = (__force struct cmsghdr __user *)msg->msg_control;
98613+ = (struct cmsghdr __force_user *)msg->msg_control;
98614 struct cmsghdr cmhdr;
98615 int cmlen = CMSG_LEN(len);
98616 int err;
98617@@ -233,7 +233,7 @@ int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
98618 err = -EFAULT;
98619 if (copy_to_user(cm, &cmhdr, sizeof cmhdr))
98620 goto out;
98621- if (copy_to_user(CMSG_DATA(cm), data, cmlen - sizeof(struct cmsghdr)))
98622+ if (copy_to_user((void __force_user *)CMSG_DATA((void __force_kernel *)cm), data, cmlen - sizeof(struct cmsghdr)))
98623 goto out;
98624 cmlen = CMSG_SPACE(len);
98625 if (msg->msg_controllen < cmlen)
98626@@ -249,7 +249,7 @@ EXPORT_SYMBOL(put_cmsg);
98627 void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
98628 {
98629 struct cmsghdr __user *cm
98630- = (__force struct cmsghdr __user*)msg->msg_control;
98631+ = (struct cmsghdr __force_user *)msg->msg_control;
98632
98633 int fdmax = 0;
98634 int fdnum = scm->fp->count;
98635@@ -269,7 +269,7 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
98636 if (fdnum < fdmax)
98637 fdmax = fdnum;
98638
98639- for (i=0, cmfptr=(__force int __user *)CMSG_DATA(cm); i<fdmax;
98640+ for (i=0, cmfptr=(int __force_user *)CMSG_DATA((void __force_kernel *)cm); i<fdmax;
98641 i++, cmfptr++)
98642 {
98643 struct socket *sock;
98644diff --git a/net/core/skbuff.c b/net/core/skbuff.c
98645index e5ae776e..15c90cb 100644
98646--- a/net/core/skbuff.c
98647+++ b/net/core/skbuff.c
98648@@ -2003,7 +2003,7 @@ EXPORT_SYMBOL(__skb_checksum);
98649 __wsum skb_checksum(const struct sk_buff *skb, int offset,
98650 int len, __wsum csum)
98651 {
98652- const struct skb_checksum_ops ops = {
98653+ static const struct skb_checksum_ops ops = {
98654 .update = csum_partial_ext,
98655 .combine = csum_block_add_ext,
98656 };
98657@@ -3220,13 +3220,15 @@ void __init skb_init(void)
98658 skbuff_head_cache = kmem_cache_create("skbuff_head_cache",
98659 sizeof(struct sk_buff),
98660 0,
98661- SLAB_HWCACHE_ALIGN|SLAB_PANIC,
98662+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|
98663+ SLAB_NO_SANITIZE,
98664 NULL);
98665 skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache",
98666 (2*sizeof(struct sk_buff)) +
98667 sizeof(atomic_t),
98668 0,
98669- SLAB_HWCACHE_ALIGN|SLAB_PANIC,
98670+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|
98671+ SLAB_NO_SANITIZE,
98672 NULL);
98673 }
98674
98675diff --git a/net/core/sock.c b/net/core/sock.c
98676index c0fc6bd..51d8326 100644
98677--- a/net/core/sock.c
98678+++ b/net/core/sock.c
98679@@ -393,7 +393,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
98680 struct sk_buff_head *list = &sk->sk_receive_queue;
98681
98682 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
98683- atomic_inc(&sk->sk_drops);
98684+ atomic_inc_unchecked(&sk->sk_drops);
98685 trace_sock_rcvqueue_full(sk, skb);
98686 return -ENOMEM;
98687 }
98688@@ -403,7 +403,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
98689 return err;
98690
98691 if (!sk_rmem_schedule(sk, skb, skb->truesize)) {
98692- atomic_inc(&sk->sk_drops);
98693+ atomic_inc_unchecked(&sk->sk_drops);
98694 return -ENOBUFS;
98695 }
98696
98697@@ -423,7 +423,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
98698 skb_dst_force(skb);
98699
98700 spin_lock_irqsave(&list->lock, flags);
98701- skb->dropcount = atomic_read(&sk->sk_drops);
98702+ skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
98703 __skb_queue_tail(list, skb);
98704 spin_unlock_irqrestore(&list->lock, flags);
98705
98706@@ -443,7 +443,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
98707 skb->dev = NULL;
98708
98709 if (sk_rcvqueues_full(sk, skb, sk->sk_rcvbuf)) {
98710- atomic_inc(&sk->sk_drops);
98711+ atomic_inc_unchecked(&sk->sk_drops);
98712 goto discard_and_relse;
98713 }
98714 if (nested)
98715@@ -461,7 +461,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
98716 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
98717 } else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) {
98718 bh_unlock_sock(sk);
98719- atomic_inc(&sk->sk_drops);
98720+ atomic_inc_unchecked(&sk->sk_drops);
98721 goto discard_and_relse;
98722 }
98723
98724@@ -949,12 +949,12 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
98725 struct timeval tm;
98726 } v;
98727
98728- int lv = sizeof(int);
98729- int len;
98730+ unsigned int lv = sizeof(int);
98731+ unsigned int len;
98732
98733 if (get_user(len, optlen))
98734 return -EFAULT;
98735- if (len < 0)
98736+ if (len > INT_MAX)
98737 return -EINVAL;
98738
98739 memset(&v, 0, sizeof(v));
98740@@ -1106,11 +1106,11 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
98741
98742 case SO_PEERNAME:
98743 {
98744- char address[128];
98745+ char address[_K_SS_MAXSIZE];
98746
98747 if (sock->ops->getname(sock, (struct sockaddr *)address, &lv, 2))
98748 return -ENOTCONN;
98749- if (lv < len)
98750+ if (lv < len || sizeof address < len)
98751 return -EINVAL;
98752 if (copy_to_user(optval, address, len))
98753 return -EFAULT;
98754@@ -1191,7 +1191,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
98755
98756 if (len > lv)
98757 len = lv;
98758- if (copy_to_user(optval, &v, len))
98759+ if (len > sizeof(v) || copy_to_user(optval, &v, len))
98760 return -EFAULT;
98761 lenout:
98762 if (put_user(len, optlen))
98763@@ -2326,7 +2326,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
98764 */
98765 smp_wmb();
98766 atomic_set(&sk->sk_refcnt, 1);
98767- atomic_set(&sk->sk_drops, 0);
98768+ atomic_set_unchecked(&sk->sk_drops, 0);
98769 }
98770 EXPORT_SYMBOL(sock_init_data);
98771
98772@@ -2454,6 +2454,7 @@ void sock_enable_timestamp(struct sock *sk, int flag)
98773 int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len,
98774 int level, int type)
98775 {
98776+ struct sock_extended_err ee;
98777 struct sock_exterr_skb *serr;
98778 struct sk_buff *skb, *skb2;
98779 int copied, err;
98780@@ -2475,7 +2476,8 @@ int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len,
98781 sock_recv_timestamp(msg, sk, skb);
98782
98783 serr = SKB_EXT_ERR(skb);
98784- put_cmsg(msg, level, type, sizeof(serr->ee), &serr->ee);
98785+ ee = serr->ee;
98786+ put_cmsg(msg, level, type, sizeof ee, &ee);
98787
98788 msg->msg_flags |= MSG_ERRQUEUE;
98789 err = copied;
98790diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c
98791index 6a7fae2..d7c22e6 100644
98792--- a/net/core/sock_diag.c
98793+++ b/net/core/sock_diag.c
98794@@ -9,26 +9,33 @@
98795 #include <linux/inet_diag.h>
98796 #include <linux/sock_diag.h>
98797
98798-static const struct sock_diag_handler *sock_diag_handlers[AF_MAX];
98799+static const struct sock_diag_handler *sock_diag_handlers[AF_MAX] __read_only;
98800 static int (*inet_rcv_compat)(struct sk_buff *skb, struct nlmsghdr *nlh);
98801 static DEFINE_MUTEX(sock_diag_table_mutex);
98802
98803 int sock_diag_check_cookie(void *sk, __u32 *cookie)
98804 {
98805+#ifndef CONFIG_GRKERNSEC_HIDESYM
98806 if ((cookie[0] != INET_DIAG_NOCOOKIE ||
98807 cookie[1] != INET_DIAG_NOCOOKIE) &&
98808 ((u32)(unsigned long)sk != cookie[0] ||
98809 (u32)((((unsigned long)sk) >> 31) >> 1) != cookie[1]))
98810 return -ESTALE;
98811 else
98812+#endif
98813 return 0;
98814 }
98815 EXPORT_SYMBOL_GPL(sock_diag_check_cookie);
98816
98817 void sock_diag_save_cookie(void *sk, __u32 *cookie)
98818 {
98819+#ifdef CONFIG_GRKERNSEC_HIDESYM
98820+ cookie[0] = 0;
98821+ cookie[1] = 0;
98822+#else
98823 cookie[0] = (u32)(unsigned long)sk;
98824 cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
98825+#endif
98826 }
98827 EXPORT_SYMBOL_GPL(sock_diag_save_cookie);
98828
98829@@ -113,8 +120,11 @@ int sock_diag_register(const struct sock_diag_handler *hndl)
98830 mutex_lock(&sock_diag_table_mutex);
98831 if (sock_diag_handlers[hndl->family])
98832 err = -EBUSY;
98833- else
98834+ else {
98835+ pax_open_kernel();
98836 sock_diag_handlers[hndl->family] = hndl;
98837+ pax_close_kernel();
98838+ }
98839 mutex_unlock(&sock_diag_table_mutex);
98840
98841 return err;
98842@@ -130,7 +140,9 @@ void sock_diag_unregister(const struct sock_diag_handler *hnld)
98843
98844 mutex_lock(&sock_diag_table_mutex);
98845 BUG_ON(sock_diag_handlers[family] != hnld);
98846+ pax_open_kernel();
98847 sock_diag_handlers[family] = NULL;
98848+ pax_close_kernel();
98849 mutex_unlock(&sock_diag_table_mutex);
98850 }
98851 EXPORT_SYMBOL_GPL(sock_diag_unregister);
98852diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
98853index cf9cd13..8b56af3 100644
98854--- a/net/core/sysctl_net_core.c
98855+++ b/net/core/sysctl_net_core.c
98856@@ -32,7 +32,7 @@ static int rps_sock_flow_sysctl(struct ctl_table *table, int write,
98857 {
98858 unsigned int orig_size, size;
98859 int ret, i;
98860- struct ctl_table tmp = {
98861+ ctl_table_no_const tmp = {
98862 .data = &size,
98863 .maxlen = sizeof(size),
98864 .mode = table->mode
98865@@ -200,7 +200,7 @@ static int set_default_qdisc(struct ctl_table *table, int write,
98866 void __user *buffer, size_t *lenp, loff_t *ppos)
98867 {
98868 char id[IFNAMSIZ];
98869- struct ctl_table tbl = {
98870+ ctl_table_no_const tbl = {
98871 .data = id,
98872 .maxlen = IFNAMSIZ,
98873 };
98874@@ -379,13 +379,12 @@ static struct ctl_table netns_core_table[] = {
98875
98876 static __net_init int sysctl_core_net_init(struct net *net)
98877 {
98878- struct ctl_table *tbl;
98879+ ctl_table_no_const *tbl = NULL;
98880
98881 net->core.sysctl_somaxconn = SOMAXCONN;
98882
98883- tbl = netns_core_table;
98884 if (!net_eq(net, &init_net)) {
98885- tbl = kmemdup(tbl, sizeof(netns_core_table), GFP_KERNEL);
98886+ tbl = kmemdup(netns_core_table, sizeof(netns_core_table), GFP_KERNEL);
98887 if (tbl == NULL)
98888 goto err_dup;
98889
98890@@ -395,17 +394,16 @@ static __net_init int sysctl_core_net_init(struct net *net)
98891 if (net->user_ns != &init_user_ns) {
98892 tbl[0].procname = NULL;
98893 }
98894- }
98895-
98896- net->core.sysctl_hdr = register_net_sysctl(net, "net/core", tbl);
98897+ net->core.sysctl_hdr = register_net_sysctl(net, "net/core", tbl);
98898+ } else
98899+ net->core.sysctl_hdr = register_net_sysctl(net, "net/core", netns_core_table);
98900 if (net->core.sysctl_hdr == NULL)
98901 goto err_reg;
98902
98903 return 0;
98904
98905 err_reg:
98906- if (tbl != netns_core_table)
98907- kfree(tbl);
98908+ kfree(tbl);
98909 err_dup:
98910 return -ENOMEM;
98911 }
98912@@ -420,7 +418,7 @@ static __net_exit void sysctl_core_net_exit(struct net *net)
98913 kfree(tbl);
98914 }
98915
98916-static __net_initdata struct pernet_operations sysctl_core_ops = {
98917+static __net_initconst struct pernet_operations sysctl_core_ops = {
98918 .init = sysctl_core_net_init,
98919 .exit = sysctl_core_net_exit,
98920 };
98921diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
98922index 4c04848..f575934 100644
98923--- a/net/decnet/af_decnet.c
98924+++ b/net/decnet/af_decnet.c
98925@@ -465,6 +465,7 @@ static struct proto dn_proto = {
98926 .sysctl_rmem = sysctl_decnet_rmem,
98927 .max_header = DN_MAX_NSP_DATA_HEADER + 64,
98928 .obj_size = sizeof(struct dn_sock),
98929+ .slab_flags = SLAB_USERCOPY,
98930 };
98931
98932 static struct sock *dn_alloc_sock(struct net *net, struct socket *sock, gfp_t gfp)
98933diff --git a/net/decnet/dn_dev.c b/net/decnet/dn_dev.c
98934index a603823..a36ee0b 100644
98935--- a/net/decnet/dn_dev.c
98936+++ b/net/decnet/dn_dev.c
98937@@ -200,7 +200,7 @@ static struct dn_dev_sysctl_table {
98938 .extra1 = &min_t3,
98939 .extra2 = &max_t3
98940 },
98941- {0}
98942+ { }
98943 },
98944 };
98945
98946diff --git a/net/decnet/sysctl_net_decnet.c b/net/decnet/sysctl_net_decnet.c
98947index 5325b54..a0d4d69 100644
98948--- a/net/decnet/sysctl_net_decnet.c
98949+++ b/net/decnet/sysctl_net_decnet.c
98950@@ -174,7 +174,7 @@ static int dn_node_address_handler(struct ctl_table *table, int write,
98951
98952 if (len > *lenp) len = *lenp;
98953
98954- if (copy_to_user(buffer, addr, len))
98955+ if (len > sizeof addr || copy_to_user(buffer, addr, len))
98956 return -EFAULT;
98957
98958 *lenp = len;
98959@@ -237,7 +237,7 @@ static int dn_def_dev_handler(struct ctl_table *table, int write,
98960
98961 if (len > *lenp) len = *lenp;
98962
98963- if (copy_to_user(buffer, devname, len))
98964+ if (len > sizeof devname || copy_to_user(buffer, devname, len))
98965 return -EFAULT;
98966
98967 *lenp = len;
98968diff --git a/net/ieee802154/dgram.c b/net/ieee802154/dgram.c
98969index 1846c1f..96d4a9f 100644
98970--- a/net/ieee802154/dgram.c
98971+++ b/net/ieee802154/dgram.c
98972@@ -313,8 +313,9 @@ static int dgram_recvmsg(struct kiocb *iocb, struct sock *sk,
98973 if (saddr) {
98974 saddr->family = AF_IEEE802154;
98975 saddr->addr = mac_cb(skb)->sa;
98976+ }
98977+ if (addr_len)
98978 *addr_len = sizeof(*saddr);
98979- }
98980
98981 if (flags & MSG_TRUNC)
98982 copied = skb->len;
98983diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
98984index 19ab78a..bf575c9 100644
98985--- a/net/ipv4/af_inet.c
98986+++ b/net/ipv4/af_inet.c
98987@@ -1703,13 +1703,9 @@ static int __init inet_init(void)
98988
98989 BUILD_BUG_ON(sizeof(struct inet_skb_parm) > FIELD_SIZEOF(struct sk_buff, cb));
98990
98991- sysctl_local_reserved_ports = kzalloc(65536 / 8, GFP_KERNEL);
98992- if (!sysctl_local_reserved_ports)
98993- goto out;
98994-
98995 rc = proto_register(&tcp_prot, 1);
98996 if (rc)
98997- goto out_free_reserved_ports;
98998+ goto out;
98999
99000 rc = proto_register(&udp_prot, 1);
99001 if (rc)
99002@@ -1816,8 +1812,6 @@ out_unregister_udp_proto:
99003 proto_unregister(&udp_prot);
99004 out_unregister_tcp_proto:
99005 proto_unregister(&tcp_prot);
99006-out_free_reserved_ports:
99007- kfree(sysctl_local_reserved_ports);
99008 goto out;
99009 }
99010
99011diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
99012index bdbf68b..deb4759 100644
99013--- a/net/ipv4/devinet.c
99014+++ b/net/ipv4/devinet.c
99015@@ -1543,7 +1543,7 @@ static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
99016 idx = 0;
99017 head = &net->dev_index_head[h];
99018 rcu_read_lock();
99019- cb->seq = atomic_read(&net->ipv4.dev_addr_genid) ^
99020+ cb->seq = atomic_read_unchecked(&net->ipv4.dev_addr_genid) ^
99021 net->dev_base_seq;
99022 hlist_for_each_entry_rcu(dev, head, index_hlist) {
99023 if (idx < s_idx)
99024@@ -1861,7 +1861,7 @@ static int inet_netconf_dump_devconf(struct sk_buff *skb,
99025 idx = 0;
99026 head = &net->dev_index_head[h];
99027 rcu_read_lock();
99028- cb->seq = atomic_read(&net->ipv4.dev_addr_genid) ^
99029+ cb->seq = atomic_read_unchecked(&net->ipv4.dev_addr_genid) ^
99030 net->dev_base_seq;
99031 hlist_for_each_entry_rcu(dev, head, index_hlist) {
99032 if (idx < s_idx)
99033@@ -2096,7 +2096,7 @@ static int ipv4_doint_and_flush(struct ctl_table *ctl, int write,
99034 #define DEVINET_SYSCTL_FLUSHING_ENTRY(attr, name) \
99035 DEVINET_SYSCTL_COMPLEX_ENTRY(attr, name, ipv4_doint_and_flush)
99036
99037-static struct devinet_sysctl_table {
99038+static const struct devinet_sysctl_table {
99039 struct ctl_table_header *sysctl_header;
99040 struct ctl_table devinet_vars[__IPV4_DEVCONF_MAX];
99041 } devinet_sysctl = {
99042@@ -2218,7 +2218,7 @@ static __net_init int devinet_init_net(struct net *net)
99043 int err;
99044 struct ipv4_devconf *all, *dflt;
99045 #ifdef CONFIG_SYSCTL
99046- struct ctl_table *tbl = ctl_forward_entry;
99047+ ctl_table_no_const *tbl = NULL;
99048 struct ctl_table_header *forw_hdr;
99049 #endif
99050
99051@@ -2236,7 +2236,7 @@ static __net_init int devinet_init_net(struct net *net)
99052 goto err_alloc_dflt;
99053
99054 #ifdef CONFIG_SYSCTL
99055- tbl = kmemdup(tbl, sizeof(ctl_forward_entry), GFP_KERNEL);
99056+ tbl = kmemdup(ctl_forward_entry, sizeof(ctl_forward_entry), GFP_KERNEL);
99057 if (tbl == NULL)
99058 goto err_alloc_ctl;
99059
99060@@ -2256,7 +2256,10 @@ static __net_init int devinet_init_net(struct net *net)
99061 goto err_reg_dflt;
99062
99063 err = -ENOMEM;
99064- forw_hdr = register_net_sysctl(net, "net/ipv4", tbl);
99065+ if (!net_eq(net, &init_net))
99066+ forw_hdr = register_net_sysctl(net, "net/ipv4", tbl);
99067+ else
99068+ forw_hdr = register_net_sysctl(net, "net/ipv4", ctl_forward_entry);
99069 if (forw_hdr == NULL)
99070 goto err_reg_ctl;
99071 net->ipv4.forw_hdr = forw_hdr;
99072@@ -2272,8 +2275,7 @@ err_reg_ctl:
99073 err_reg_dflt:
99074 __devinet_sysctl_unregister(all);
99075 err_reg_all:
99076- if (tbl != ctl_forward_entry)
99077- kfree(tbl);
99078+ kfree(tbl);
99079 err_alloc_ctl:
99080 #endif
99081 if (dflt != &ipv4_devconf_dflt)
99082diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
99083index c7539e2..b455e51 100644
99084--- a/net/ipv4/fib_frontend.c
99085+++ b/net/ipv4/fib_frontend.c
99086@@ -1015,12 +1015,12 @@ static int fib_inetaddr_event(struct notifier_block *this, unsigned long event,
99087 #ifdef CONFIG_IP_ROUTE_MULTIPATH
99088 fib_sync_up(dev);
99089 #endif
99090- atomic_inc(&net->ipv4.dev_addr_genid);
99091+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
99092 rt_cache_flush(dev_net(dev));
99093 break;
99094 case NETDEV_DOWN:
99095 fib_del_ifaddr(ifa, NULL);
99096- atomic_inc(&net->ipv4.dev_addr_genid);
99097+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
99098 if (ifa->ifa_dev->ifa_list == NULL) {
99099 /* Last address was deleted from this interface.
99100 * Disable IP.
99101@@ -1058,7 +1058,7 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo
99102 #ifdef CONFIG_IP_ROUTE_MULTIPATH
99103 fib_sync_up(dev);
99104 #endif
99105- atomic_inc(&net->ipv4.dev_addr_genid);
99106+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
99107 rt_cache_flush(net);
99108 break;
99109 case NETDEV_DOWN:
99110diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
99111index 9d43468..ffa28cc 100644
99112--- a/net/ipv4/fib_semantics.c
99113+++ b/net/ipv4/fib_semantics.c
99114@@ -767,7 +767,7 @@ __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh)
99115 nh->nh_saddr = inet_select_addr(nh->nh_dev,
99116 nh->nh_gw,
99117 nh->nh_parent->fib_scope);
99118- nh->nh_saddr_genid = atomic_read(&net->ipv4.dev_addr_genid);
99119+ nh->nh_saddr_genid = atomic_read_unchecked(&net->ipv4.dev_addr_genid);
99120
99121 return nh->nh_saddr;
99122 }
99123diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
99124index 0d1e2cb..4501a2c 100644
99125--- a/net/ipv4/inet_connection_sock.c
99126+++ b/net/ipv4/inet_connection_sock.c
99127@@ -29,7 +29,7 @@ const char inet_csk_timer_bug_msg[] = "inet_csk BUG: unknown timer value\n";
99128 EXPORT_SYMBOL(inet_csk_timer_bug_msg);
99129 #endif
99130
99131-unsigned long *sysctl_local_reserved_ports;
99132+unsigned long sysctl_local_reserved_ports[65536 / 8 / sizeof(unsigned long)];
99133 EXPORT_SYMBOL(sysctl_local_reserved_ports);
99134
99135 void inet_get_local_port_range(struct net *net, int *low, int *high)
99136diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
99137index 8b9cf27..0d8d592 100644
99138--- a/net/ipv4/inet_hashtables.c
99139+++ b/net/ipv4/inet_hashtables.c
99140@@ -18,6 +18,7 @@
99141 #include <linux/sched.h>
99142 #include <linux/slab.h>
99143 #include <linux/wait.h>
99144+#include <linux/security.h>
99145
99146 #include <net/inet_connection_sock.h>
99147 #include <net/inet_hashtables.h>
99148@@ -49,6 +50,8 @@ static unsigned int inet_sk_ehashfn(const struct sock *sk)
99149 return inet_ehashfn(net, laddr, lport, faddr, fport);
99150 }
99151
99152+extern void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet);
99153+
99154 /*
99155 * Allocate and initialize a new local port bind bucket.
99156 * The bindhash mutex for snum's hash chain must be held here.
99157@@ -554,6 +557,8 @@ ok:
99158 twrefcnt += inet_twsk_bind_unhash(tw, hinfo);
99159 spin_unlock(&head->lock);
99160
99161+ gr_update_task_in_ip_table(current, inet_sk(sk));
99162+
99163 if (tw) {
99164 inet_twsk_deschedule(tw, death_row);
99165 while (twrefcnt) {
99166diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
99167index 48f4244..f56d83a 100644
99168--- a/net/ipv4/inetpeer.c
99169+++ b/net/ipv4/inetpeer.c
99170@@ -496,8 +496,8 @@ relookup:
99171 if (p) {
99172 p->daddr = *daddr;
99173 atomic_set(&p->refcnt, 1);
99174- atomic_set(&p->rid, 0);
99175- atomic_set(&p->ip_id_count,
99176+ atomic_set_unchecked(&p->rid, 0);
99177+ atomic_set_unchecked(&p->ip_id_count,
99178 (daddr->family == AF_INET) ?
99179 secure_ip_id(daddr->addr.a4) :
99180 secure_ipv6_id(daddr->addr.a6));
99181diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
99182index c10a3ce..dd71f84 100644
99183--- a/net/ipv4/ip_fragment.c
99184+++ b/net/ipv4/ip_fragment.c
99185@@ -283,7 +283,7 @@ static inline int ip_frag_too_far(struct ipq *qp)
99186 return 0;
99187
99188 start = qp->rid;
99189- end = atomic_inc_return(&peer->rid);
99190+ end = atomic_inc_return_unchecked(&peer->rid);
99191 qp->rid = end;
99192
99193 rc = qp->q.fragments && (end - start) > max;
99194@@ -760,12 +760,11 @@ static struct ctl_table ip4_frags_ctl_table[] = {
99195
99196 static int __net_init ip4_frags_ns_ctl_register(struct net *net)
99197 {
99198- struct ctl_table *table;
99199+ ctl_table_no_const *table = NULL;
99200 struct ctl_table_header *hdr;
99201
99202- table = ip4_frags_ns_ctl_table;
99203 if (!net_eq(net, &init_net)) {
99204- table = kmemdup(table, sizeof(ip4_frags_ns_ctl_table), GFP_KERNEL);
99205+ table = kmemdup(ip4_frags_ns_ctl_table, sizeof(ip4_frags_ns_ctl_table), GFP_KERNEL);
99206 if (table == NULL)
99207 goto err_alloc;
99208
99209@@ -776,9 +775,10 @@ static int __net_init ip4_frags_ns_ctl_register(struct net *net)
99210 /* Don't export sysctls to unprivileged users */
99211 if (net->user_ns != &init_user_ns)
99212 table[0].procname = NULL;
99213- }
99214+ hdr = register_net_sysctl(net, "net/ipv4", table);
99215+ } else
99216+ hdr = register_net_sysctl(net, "net/ipv4", ip4_frags_ns_ctl_table);
99217
99218- hdr = register_net_sysctl(net, "net/ipv4", table);
99219 if (hdr == NULL)
99220 goto err_reg;
99221
99222@@ -786,8 +786,7 @@ static int __net_init ip4_frags_ns_ctl_register(struct net *net)
99223 return 0;
99224
99225 err_reg:
99226- if (!net_eq(net, &init_net))
99227- kfree(table);
99228+ kfree(table);
99229 err_alloc:
99230 return -ENOMEM;
99231 }
99232diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
99233index 94213c8..8bdb342 100644
99234--- a/net/ipv4/ip_gre.c
99235+++ b/net/ipv4/ip_gre.c
99236@@ -115,7 +115,7 @@ static bool log_ecn_error = true;
99237 module_param(log_ecn_error, bool, 0644);
99238 MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
99239
99240-static struct rtnl_link_ops ipgre_link_ops __read_mostly;
99241+static struct rtnl_link_ops ipgre_link_ops;
99242 static int ipgre_tunnel_init(struct net_device *dev);
99243
99244 static int ipgre_net_id __read_mostly;
99245@@ -732,7 +732,7 @@ static const struct nla_policy ipgre_policy[IFLA_GRE_MAX + 1] = {
99246 [IFLA_GRE_PMTUDISC] = { .type = NLA_U8 },
99247 };
99248
99249-static struct rtnl_link_ops ipgre_link_ops __read_mostly = {
99250+static struct rtnl_link_ops ipgre_link_ops = {
99251 .kind = "gre",
99252 .maxtype = IFLA_GRE_MAX,
99253 .policy = ipgre_policy,
99254@@ -746,7 +746,7 @@ static struct rtnl_link_ops ipgre_link_ops __read_mostly = {
99255 .fill_info = ipgre_fill_info,
99256 };
99257
99258-static struct rtnl_link_ops ipgre_tap_ops __read_mostly = {
99259+static struct rtnl_link_ops ipgre_tap_ops = {
99260 .kind = "gretap",
99261 .maxtype = IFLA_GRE_MAX,
99262 .policy = ipgre_policy,
99263diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
99264index 580dd96..9fcef7e 100644
99265--- a/net/ipv4/ip_sockglue.c
99266+++ b/net/ipv4/ip_sockglue.c
99267@@ -1171,7 +1171,8 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
99268 len = min_t(unsigned int, len, opt->optlen);
99269 if (put_user(len, optlen))
99270 return -EFAULT;
99271- if (copy_to_user(optval, opt->__data, len))
99272+ if ((len > (sizeof(optbuf) - sizeof(struct ip_options))) ||
99273+ copy_to_user(optval, opt->__data, len))
99274 return -EFAULT;
99275 return 0;
99276 }
99277@@ -1302,7 +1303,7 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
99278 if (sk->sk_type != SOCK_STREAM)
99279 return -ENOPROTOOPT;
99280
99281- msg.msg_control = optval;
99282+ msg.msg_control = (void __force_kernel *)optval;
99283 msg.msg_controllen = len;
99284 msg.msg_flags = flags;
99285
99286diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
99287index e4a8f76..dd8ad72 100644
99288--- a/net/ipv4/ip_vti.c
99289+++ b/net/ipv4/ip_vti.c
99290@@ -44,7 +44,7 @@
99291 #include <net/net_namespace.h>
99292 #include <net/netns/generic.h>
99293
99294-static struct rtnl_link_ops vti_link_ops __read_mostly;
99295+static struct rtnl_link_ops vti_link_ops;
99296
99297 static int vti_net_id __read_mostly;
99298 static int vti_tunnel_init(struct net_device *dev);
99299@@ -360,7 +360,7 @@ static const struct nla_policy vti_policy[IFLA_VTI_MAX + 1] = {
99300 [IFLA_VTI_REMOTE] = { .len = FIELD_SIZEOF(struct iphdr, daddr) },
99301 };
99302
99303-static struct rtnl_link_ops vti_link_ops __read_mostly = {
99304+static struct rtnl_link_ops vti_link_ops = {
99305 .kind = "vti",
99306 .maxtype = IFLA_VTI_MAX,
99307 .policy = vti_policy,
99308diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
99309index b3e86ea..18ce98c 100644
99310--- a/net/ipv4/ipconfig.c
99311+++ b/net/ipv4/ipconfig.c
99312@@ -334,7 +334,7 @@ static int __init ic_devinet_ioctl(unsigned int cmd, struct ifreq *arg)
99313
99314 mm_segment_t oldfs = get_fs();
99315 set_fs(get_ds());
99316- res = devinet_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
99317+ res = devinet_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
99318 set_fs(oldfs);
99319 return res;
99320 }
99321@@ -345,7 +345,7 @@ static int __init ic_dev_ioctl(unsigned int cmd, struct ifreq *arg)
99322
99323 mm_segment_t oldfs = get_fs();
99324 set_fs(get_ds());
99325- res = dev_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
99326+ res = dev_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
99327 set_fs(oldfs);
99328 return res;
99329 }
99330@@ -356,7 +356,7 @@ static int __init ic_route_ioctl(unsigned int cmd, struct rtentry *arg)
99331
99332 mm_segment_t oldfs = get_fs();
99333 set_fs(get_ds());
99334- res = ip_rt_ioctl(&init_net, cmd, (void __user *) arg);
99335+ res = ip_rt_ioctl(&init_net, cmd, (void __force_user *) arg);
99336 set_fs(oldfs);
99337 return res;
99338 }
99339diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
99340index 812b183..56cbe9c 100644
99341--- a/net/ipv4/ipip.c
99342+++ b/net/ipv4/ipip.c
99343@@ -124,7 +124,7 @@ MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
99344 static int ipip_net_id __read_mostly;
99345
99346 static int ipip_tunnel_init(struct net_device *dev);
99347-static struct rtnl_link_ops ipip_link_ops __read_mostly;
99348+static struct rtnl_link_ops ipip_link_ops;
99349
99350 static int ipip_err(struct sk_buff *skb, u32 info)
99351 {
99352@@ -409,7 +409,7 @@ static const struct nla_policy ipip_policy[IFLA_IPTUN_MAX + 1] = {
99353 [IFLA_IPTUN_PMTUDISC] = { .type = NLA_U8 },
99354 };
99355
99356-static struct rtnl_link_ops ipip_link_ops __read_mostly = {
99357+static struct rtnl_link_ops ipip_link_ops = {
99358 .kind = "ipip",
99359 .maxtype = IFLA_IPTUN_MAX,
99360 .policy = ipip_policy,
99361diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
99362index f95b6f9..2ee2097 100644
99363--- a/net/ipv4/netfilter/arp_tables.c
99364+++ b/net/ipv4/netfilter/arp_tables.c
99365@@ -885,14 +885,14 @@ static int compat_table_info(const struct xt_table_info *info,
99366 #endif
99367
99368 static int get_info(struct net *net, void __user *user,
99369- const int *len, int compat)
99370+ int len, int compat)
99371 {
99372 char name[XT_TABLE_MAXNAMELEN];
99373 struct xt_table *t;
99374 int ret;
99375
99376- if (*len != sizeof(struct arpt_getinfo)) {
99377- duprintf("length %u != %Zu\n", *len,
99378+ if (len != sizeof(struct arpt_getinfo)) {
99379+ duprintf("length %u != %Zu\n", len,
99380 sizeof(struct arpt_getinfo));
99381 return -EINVAL;
99382 }
99383@@ -929,7 +929,7 @@ static int get_info(struct net *net, void __user *user,
99384 info.size = private->size;
99385 strcpy(info.name, name);
99386
99387- if (copy_to_user(user, &info, *len) != 0)
99388+ if (copy_to_user(user, &info, len) != 0)
99389 ret = -EFAULT;
99390 else
99391 ret = 0;
99392@@ -1690,7 +1690,7 @@ static int compat_do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user,
99393
99394 switch (cmd) {
99395 case ARPT_SO_GET_INFO:
99396- ret = get_info(sock_net(sk), user, len, 1);
99397+ ret = get_info(sock_net(sk), user, *len, 1);
99398 break;
99399 case ARPT_SO_GET_ENTRIES:
99400 ret = compat_get_entries(sock_net(sk), user, len);
99401@@ -1735,7 +1735,7 @@ static int do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len
99402
99403 switch (cmd) {
99404 case ARPT_SO_GET_INFO:
99405- ret = get_info(sock_net(sk), user, len, 0);
99406+ ret = get_info(sock_net(sk), user, *len, 0);
99407 break;
99408
99409 case ARPT_SO_GET_ENTRIES:
99410diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
99411index 99e810f..3711b81 100644
99412--- a/net/ipv4/netfilter/ip_tables.c
99413+++ b/net/ipv4/netfilter/ip_tables.c
99414@@ -1073,14 +1073,14 @@ static int compat_table_info(const struct xt_table_info *info,
99415 #endif
99416
99417 static int get_info(struct net *net, void __user *user,
99418- const int *len, int compat)
99419+ int len, int compat)
99420 {
99421 char name[XT_TABLE_MAXNAMELEN];
99422 struct xt_table *t;
99423 int ret;
99424
99425- if (*len != sizeof(struct ipt_getinfo)) {
99426- duprintf("length %u != %zu\n", *len,
99427+ if (len != sizeof(struct ipt_getinfo)) {
99428+ duprintf("length %u != %zu\n", len,
99429 sizeof(struct ipt_getinfo));
99430 return -EINVAL;
99431 }
99432@@ -1117,7 +1117,7 @@ static int get_info(struct net *net, void __user *user,
99433 info.size = private->size;
99434 strcpy(info.name, name);
99435
99436- if (copy_to_user(user, &info, *len) != 0)
99437+ if (copy_to_user(user, &info, len) != 0)
99438 ret = -EFAULT;
99439 else
99440 ret = 0;
99441@@ -1973,7 +1973,7 @@ compat_do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
99442
99443 switch (cmd) {
99444 case IPT_SO_GET_INFO:
99445- ret = get_info(sock_net(sk), user, len, 1);
99446+ ret = get_info(sock_net(sk), user, *len, 1);
99447 break;
99448 case IPT_SO_GET_ENTRIES:
99449 ret = compat_get_entries(sock_net(sk), user, len);
99450@@ -2020,7 +2020,7 @@ do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
99451
99452 switch (cmd) {
99453 case IPT_SO_GET_INFO:
99454- ret = get_info(sock_net(sk), user, len, 0);
99455+ ret = get_info(sock_net(sk), user, *len, 0);
99456 break;
99457
99458 case IPT_SO_GET_ENTRIES:
99459diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c
99460index 2510c02..cfb34fa 100644
99461--- a/net/ipv4/netfilter/ipt_CLUSTERIP.c
99462+++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c
99463@@ -720,7 +720,7 @@ static int clusterip_net_init(struct net *net)
99464 spin_lock_init(&cn->lock);
99465
99466 #ifdef CONFIG_PROC_FS
99467- cn->procdir = proc_mkdir("ipt_CLUSTERIP", net->proc_net);
99468+ cn->procdir = proc_mkdir_restrict("ipt_CLUSTERIP", net->proc_net);
99469 if (!cn->procdir) {
99470 pr_err("Unable to proc dir entry\n");
99471 return -ENOMEM;
99472diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
99473index e21934b..16f52a6 100644
99474--- a/net/ipv4/ping.c
99475+++ b/net/ipv4/ping.c
99476@@ -59,7 +59,7 @@ struct ping_table {
99477 };
99478
99479 static struct ping_table ping_table;
99480-struct pingv6_ops pingv6_ops;
99481+struct pingv6_ops *pingv6_ops;
99482 EXPORT_SYMBOL_GPL(pingv6_ops);
99483
99484 static u16 ping_port_rover;
99485@@ -259,7 +259,7 @@ int ping_init_sock(struct sock *sk)
99486
99487 inet_get_ping_group_range_net(net, &low, &high);
99488 if (gid_lte(low, group) && gid_lte(group, high))
99489- return 0;
99490+ goto out_release_group;
99491
99492 group_info = get_current_groups();
99493 count = group_info->ngroups;
99494@@ -348,7 +348,7 @@ static int ping_check_bind_addr(struct sock *sk, struct inet_sock *isk,
99495 return -ENODEV;
99496 }
99497 }
99498- has_addr = pingv6_ops.ipv6_chk_addr(net, &addr->sin6_addr, dev,
99499+ has_addr = pingv6_ops->ipv6_chk_addr(net, &addr->sin6_addr, dev,
99500 scoped);
99501 rcu_read_unlock();
99502
99503@@ -556,7 +556,7 @@ void ping_err(struct sk_buff *skb, int offset, u32 info)
99504 }
99505 #if IS_ENABLED(CONFIG_IPV6)
99506 } else if (skb->protocol == htons(ETH_P_IPV6)) {
99507- harderr = pingv6_ops.icmpv6_err_convert(type, code, &err);
99508+ harderr = pingv6_ops->icmpv6_err_convert(type, code, &err);
99509 #endif
99510 }
99511
99512@@ -574,7 +574,7 @@ void ping_err(struct sk_buff *skb, int offset, u32 info)
99513 info, (u8 *)icmph);
99514 #if IS_ENABLED(CONFIG_IPV6)
99515 } else if (family == AF_INET6) {
99516- pingv6_ops.ipv6_icmp_error(sk, skb, err, 0,
99517+ pingv6_ops->ipv6_icmp_error(sk, skb, err, 0,
99518 info, (u8 *)icmph);
99519 #endif
99520 }
99521@@ -844,6 +844,8 @@ int ping_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
99522 {
99523 struct inet_sock *isk = inet_sk(sk);
99524 int family = sk->sk_family;
99525+ struct sockaddr_in *sin;
99526+ struct sockaddr_in6 *sin6;
99527 struct sk_buff *skb;
99528 int copied, err;
99529
99530@@ -853,12 +855,19 @@ int ping_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
99531 if (flags & MSG_OOB)
99532 goto out;
99533
99534+ if (addr_len) {
99535+ if (family == AF_INET)
99536+ *addr_len = sizeof(*sin);
99537+ else if (family == AF_INET6 && addr_len)
99538+ *addr_len = sizeof(*sin6);
99539+ }
99540+
99541 if (flags & MSG_ERRQUEUE) {
99542 if (family == AF_INET) {
99543 return ip_recv_error(sk, msg, len, addr_len);
99544 #if IS_ENABLED(CONFIG_IPV6)
99545 } else if (family == AF_INET6) {
99546- return pingv6_ops.ipv6_recv_error(sk, msg, len,
99547+ return pingv6_ops->ipv6_recv_error(sk, msg, len,
99548 addr_len);
99549 #endif
99550 }
99551@@ -890,7 +899,6 @@ int ping_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
99552 sin->sin_port = 0 /* skb->h.uh->source */;
99553 sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
99554 memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
99555- *addr_len = sizeof(*sin);
99556 }
99557
99558 if (isk->cmsg_flags)
99559@@ -912,14 +920,13 @@ int ping_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
99560 sin6->sin6_scope_id =
99561 ipv6_iface_scope_id(&sin6->sin6_addr,
99562 IP6CB(skb)->iif);
99563- *addr_len = sizeof(*sin6);
99564 }
99565
99566 if (inet6_sk(sk)->rxopt.all)
99567- pingv6_ops.ip6_datagram_recv_common_ctl(sk, msg, skb);
99568+ pingv6_ops->ip6_datagram_recv_common_ctl(sk, msg, skb);
99569 if (skb->protocol == htons(ETH_P_IPV6) &&
99570 inet6_sk(sk)->rxopt.all)
99571- pingv6_ops.ip6_datagram_recv_specific_ctl(sk, msg, skb);
99572+ pingv6_ops->ip6_datagram_recv_specific_ctl(sk, msg, skb);
99573 else if (skb->protocol == htons(ETH_P_IP) && isk->cmsg_flags)
99574 ip_cmsg_recv(msg, skb);
99575 #endif
99576@@ -1111,7 +1118,7 @@ static void ping_v4_format_sock(struct sock *sp, struct seq_file *f,
99577 from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)),
99578 0, sock_i_ino(sp),
99579 atomic_read(&sp->sk_refcnt), sp,
99580- atomic_read(&sp->sk_drops));
99581+ atomic_read_unchecked(&sp->sk_drops));
99582 }
99583
99584 static int ping_v4_seq_show(struct seq_file *seq, void *v)
99585diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
99586index c04518f..824ebe5 100644
99587--- a/net/ipv4/raw.c
99588+++ b/net/ipv4/raw.c
99589@@ -311,7 +311,7 @@ static int raw_rcv_skb(struct sock *sk, struct sk_buff *skb)
99590 int raw_rcv(struct sock *sk, struct sk_buff *skb)
99591 {
99592 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
99593- atomic_inc(&sk->sk_drops);
99594+ atomic_inc_unchecked(&sk->sk_drops);
99595 kfree_skb(skb);
99596 return NET_RX_DROP;
99597 }
99598@@ -696,6 +696,9 @@ static int raw_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
99599 if (flags & MSG_OOB)
99600 goto out;
99601
99602+ if (addr_len)
99603+ *addr_len = sizeof(*sin);
99604+
99605 if (flags & MSG_ERRQUEUE) {
99606 err = ip_recv_error(sk, msg, len, addr_len);
99607 goto out;
99608@@ -723,7 +726,6 @@ static int raw_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
99609 sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
99610 sin->sin_port = 0;
99611 memset(&sin->sin_zero, 0, sizeof(sin->sin_zero));
99612- *addr_len = sizeof(*sin);
99613 }
99614 if (inet->cmsg_flags)
99615 ip_cmsg_recv(msg, skb);
99616@@ -748,16 +750,20 @@ static int raw_init(struct sock *sk)
99617
99618 static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
99619 {
99620+ struct icmp_filter filter;
99621+
99622 if (optlen > sizeof(struct icmp_filter))
99623 optlen = sizeof(struct icmp_filter);
99624- if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
99625+ if (copy_from_user(&filter, optval, optlen))
99626 return -EFAULT;
99627+ raw_sk(sk)->filter = filter;
99628 return 0;
99629 }
99630
99631 static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
99632 {
99633 int len, ret = -EFAULT;
99634+ struct icmp_filter filter;
99635
99636 if (get_user(len, optlen))
99637 goto out;
99638@@ -767,8 +773,8 @@ static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *o
99639 if (len > sizeof(struct icmp_filter))
99640 len = sizeof(struct icmp_filter);
99641 ret = -EFAULT;
99642- if (put_user(len, optlen) ||
99643- copy_to_user(optval, &raw_sk(sk)->filter, len))
99644+ filter = raw_sk(sk)->filter;
99645+ if (put_user(len, optlen) || len > sizeof filter || copy_to_user(optval, &filter, len))
99646 goto out;
99647 ret = 0;
99648 out: return ret;
99649@@ -997,7 +1003,7 @@ static void raw_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
99650 0, 0L, 0,
99651 from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
99652 0, sock_i_ino(sp),
99653- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
99654+ atomic_read(&sp->sk_refcnt), sp, atomic_read_unchecked(&sp->sk_drops));
99655 }
99656
99657 static int raw_seq_show(struct seq_file *seq, void *v)
99658diff --git a/net/ipv4/route.c b/net/ipv4/route.c
99659index 1344373..02f339e 100644
99660--- a/net/ipv4/route.c
99661+++ b/net/ipv4/route.c
99662@@ -233,7 +233,7 @@ static const struct seq_operations rt_cache_seq_ops = {
99663
99664 static int rt_cache_seq_open(struct inode *inode, struct file *file)
99665 {
99666- return seq_open(file, &rt_cache_seq_ops);
99667+ return seq_open_restrict(file, &rt_cache_seq_ops);
99668 }
99669
99670 static const struct file_operations rt_cache_seq_fops = {
99671@@ -324,7 +324,7 @@ static const struct seq_operations rt_cpu_seq_ops = {
99672
99673 static int rt_cpu_seq_open(struct inode *inode, struct file *file)
99674 {
99675- return seq_open(file, &rt_cpu_seq_ops);
99676+ return seq_open_restrict(file, &rt_cpu_seq_ops);
99677 }
99678
99679 static const struct file_operations rt_cpu_seq_fops = {
99680@@ -362,7 +362,7 @@ static int rt_acct_proc_show(struct seq_file *m, void *v)
99681
99682 static int rt_acct_proc_open(struct inode *inode, struct file *file)
99683 {
99684- return single_open(file, rt_acct_proc_show, NULL);
99685+ return single_open_restrict(file, rt_acct_proc_show, NULL);
99686 }
99687
99688 static const struct file_operations rt_acct_proc_fops = {
99689@@ -2623,34 +2623,34 @@ static struct ctl_table ipv4_route_flush_table[] = {
99690 .maxlen = sizeof(int),
99691 .mode = 0200,
99692 .proc_handler = ipv4_sysctl_rtcache_flush,
99693+ .extra1 = &init_net,
99694 },
99695 { },
99696 };
99697
99698 static __net_init int sysctl_route_net_init(struct net *net)
99699 {
99700- struct ctl_table *tbl;
99701+ ctl_table_no_const *tbl = NULL;
99702
99703- tbl = ipv4_route_flush_table;
99704 if (!net_eq(net, &init_net)) {
99705- tbl = kmemdup(tbl, sizeof(ipv4_route_flush_table), GFP_KERNEL);
99706+ tbl = kmemdup(ipv4_route_flush_table, sizeof(ipv4_route_flush_table), GFP_KERNEL);
99707 if (tbl == NULL)
99708 goto err_dup;
99709
99710 /* Don't export sysctls to unprivileged users */
99711 if (net->user_ns != &init_user_ns)
99712 tbl[0].procname = NULL;
99713- }
99714- tbl[0].extra1 = net;
99715+ tbl[0].extra1 = net;
99716+ net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", tbl);
99717+ } else
99718+ net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", ipv4_route_flush_table);
99719
99720- net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", tbl);
99721 if (net->ipv4.route_hdr == NULL)
99722 goto err_reg;
99723 return 0;
99724
99725 err_reg:
99726- if (tbl != ipv4_route_flush_table)
99727- kfree(tbl);
99728+ kfree(tbl);
99729 err_dup:
99730 return -ENOMEM;
99731 }
99732@@ -2673,8 +2673,8 @@ static __net_initdata struct pernet_operations sysctl_route_ops = {
99733
99734 static __net_init int rt_genid_init(struct net *net)
99735 {
99736- atomic_set(&net->ipv4.rt_genid, 0);
99737- atomic_set(&net->fnhe_genid, 0);
99738+ atomic_set_unchecked(&net->ipv4.rt_genid, 0);
99739+ atomic_set_unchecked(&net->fnhe_genid, 0);
99740 get_random_bytes(&net->ipv4.dev_addr_genid,
99741 sizeof(net->ipv4.dev_addr_genid));
99742 return 0;
99743diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
99744index 44eba05..b36864b 100644
99745--- a/net/ipv4/sysctl_net_ipv4.c
99746+++ b/net/ipv4/sysctl_net_ipv4.c
99747@@ -60,7 +60,7 @@ static int ipv4_local_port_range(struct ctl_table *table, int write,
99748 container_of(table->data, struct net, ipv4.sysctl_local_ports.range);
99749 int ret;
99750 int range[2];
99751- struct ctl_table tmp = {
99752+ ctl_table_no_const tmp = {
99753 .data = &range,
99754 .maxlen = sizeof(range),
99755 .mode = table->mode,
99756@@ -118,7 +118,7 @@ static int ipv4_ping_group_range(struct ctl_table *table, int write,
99757 int ret;
99758 gid_t urange[2];
99759 kgid_t low, high;
99760- struct ctl_table tmp = {
99761+ ctl_table_no_const tmp = {
99762 .data = &urange,
99763 .maxlen = sizeof(urange),
99764 .mode = table->mode,
99765@@ -149,7 +149,7 @@ static int proc_tcp_congestion_control(struct ctl_table *ctl, int write,
99766 void __user *buffer, size_t *lenp, loff_t *ppos)
99767 {
99768 char val[TCP_CA_NAME_MAX];
99769- struct ctl_table tbl = {
99770+ ctl_table_no_const tbl = {
99771 .data = val,
99772 .maxlen = TCP_CA_NAME_MAX,
99773 };
99774@@ -168,7 +168,7 @@ static int proc_tcp_available_congestion_control(struct ctl_table *ctl,
99775 void __user *buffer, size_t *lenp,
99776 loff_t *ppos)
99777 {
99778- struct ctl_table tbl = { .maxlen = TCP_CA_BUF_MAX, };
99779+ ctl_table_no_const tbl = { .maxlen = TCP_CA_BUF_MAX, };
99780 int ret;
99781
99782 tbl.data = kmalloc(tbl.maxlen, GFP_USER);
99783@@ -185,7 +185,7 @@ static int proc_allowed_congestion_control(struct ctl_table *ctl,
99784 void __user *buffer, size_t *lenp,
99785 loff_t *ppos)
99786 {
99787- struct ctl_table tbl = { .maxlen = TCP_CA_BUF_MAX };
99788+ ctl_table_no_const tbl = { .maxlen = TCP_CA_BUF_MAX };
99789 int ret;
99790
99791 tbl.data = kmalloc(tbl.maxlen, GFP_USER);
99792@@ -204,7 +204,7 @@ static int proc_tcp_fastopen_key(struct ctl_table *ctl, int write,
99793 void __user *buffer, size_t *lenp,
99794 loff_t *ppos)
99795 {
99796- struct ctl_table tbl = { .maxlen = (TCP_FASTOPEN_KEY_LENGTH * 2 + 10) };
99797+ ctl_table_no_const tbl = { .maxlen = (TCP_FASTOPEN_KEY_LENGTH * 2 + 10) };
99798 struct tcp_fastopen_context *ctxt;
99799 int ret;
99800 u32 user_key[4]; /* 16 bytes, matching TCP_FASTOPEN_KEY_LENGTH */
99801@@ -438,7 +438,7 @@ static struct ctl_table ipv4_table[] = {
99802 },
99803 {
99804 .procname = "ip_local_reserved_ports",
99805- .data = NULL, /* initialized in sysctl_ipv4_init */
99806+ .data = sysctl_local_reserved_ports,
99807 .maxlen = 65536,
99808 .mode = 0644,
99809 .proc_handler = proc_do_large_bitmap,
99810@@ -843,13 +843,12 @@ static struct ctl_table ipv4_net_table[] = {
99811
99812 static __net_init int ipv4_sysctl_init_net(struct net *net)
99813 {
99814- struct ctl_table *table;
99815+ ctl_table_no_const *table = NULL;
99816
99817- table = ipv4_net_table;
99818 if (!net_eq(net, &init_net)) {
99819 int i;
99820
99821- table = kmemdup(table, sizeof(ipv4_net_table), GFP_KERNEL);
99822+ table = kmemdup(ipv4_net_table, sizeof(ipv4_net_table), GFP_KERNEL);
99823 if (table == NULL)
99824 goto err_alloc;
99825
99826@@ -872,15 +871,17 @@ static __net_init int ipv4_sysctl_init_net(struct net *net)
99827 net->ipv4.sysctl_local_ports.range[0] = 32768;
99828 net->ipv4.sysctl_local_ports.range[1] = 61000;
99829
99830- net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", table);
99831+ if (!net_eq(net, &init_net))
99832+ net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", table);
99833+ else
99834+ net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", ipv4_net_table);
99835 if (net->ipv4.ipv4_hdr == NULL)
99836 goto err_reg;
99837
99838 return 0;
99839
99840 err_reg:
99841- if (!net_eq(net, &init_net))
99842- kfree(table);
99843+ kfree(table);
99844 err_alloc:
99845 return -ENOMEM;
99846 }
99847@@ -902,16 +903,6 @@ static __net_initdata struct pernet_operations ipv4_sysctl_ops = {
99848 static __init int sysctl_ipv4_init(void)
99849 {
99850 struct ctl_table_header *hdr;
99851- struct ctl_table *i;
99852-
99853- for (i = ipv4_table; i->procname; i++) {
99854- if (strcmp(i->procname, "ip_local_reserved_ports") == 0) {
99855- i->data = sysctl_local_reserved_ports;
99856- break;
99857- }
99858- }
99859- if (!i->procname)
99860- return -EINVAL;
99861
99862 hdr = register_net_sysctl(&init_net, "net/ipv4", ipv4_table);
99863 if (hdr == NULL)
99864diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
99865index eeaac39..dc29942 100644
99866--- a/net/ipv4/tcp_input.c
99867+++ b/net/ipv4/tcp_input.c
99868@@ -761,7 +761,7 @@ static void tcp_update_pacing_rate(struct sock *sk)
99869 * without any lock. We want to make sure compiler wont store
99870 * intermediate values in this location.
99871 */
99872- ACCESS_ONCE(sk->sk_pacing_rate) = min_t(u64, rate,
99873+ ACCESS_ONCE_RW(sk->sk_pacing_rate) = min_t(u64, rate,
99874 sk->sk_max_pacing_rate);
99875 }
99876
99877@@ -4485,7 +4485,7 @@ static struct sk_buff *tcp_collapse_one(struct sock *sk, struct sk_buff *skb,
99878 * simplifies code)
99879 */
99880 static void
99881-tcp_collapse(struct sock *sk, struct sk_buff_head *list,
99882+__intentional_overflow(5,6) tcp_collapse(struct sock *sk, struct sk_buff_head *list,
99883 struct sk_buff *head, struct sk_buff *tail,
99884 u32 start, u32 end)
99885 {
99886@@ -5562,6 +5562,7 @@ discard:
99887 tcp_paws_reject(&tp->rx_opt, 0))
99888 goto discard_and_undo;
99889
99890+#ifndef CONFIG_GRKERNSEC_NO_SIMULT_CONNECT
99891 if (th->syn) {
99892 /* We see SYN without ACK. It is attempt of
99893 * simultaneous connect with crossed SYNs.
99894@@ -5612,6 +5613,7 @@ discard:
99895 goto discard;
99896 #endif
99897 }
99898+#endif
99899 /* "fifth, if neither of the SYN or RST bits is set then
99900 * drop the segment and return."
99901 */
99902@@ -5658,7 +5660,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
99903 goto discard;
99904
99905 if (th->syn) {
99906- if (th->fin)
99907+ if (th->fin || th->urg || th->psh)
99908 goto discard;
99909 if (icsk->icsk_af_ops->conn_request(sk, skb) < 0)
99910 return 1;
99911diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
99912index 1e4eac7..a66fa4a 100644
99913--- a/net/ipv4/tcp_ipv4.c
99914+++ b/net/ipv4/tcp_ipv4.c
99915@@ -91,6 +91,10 @@ int sysctl_tcp_low_latency __read_mostly;
99916 EXPORT_SYMBOL(sysctl_tcp_low_latency);
99917
99918
99919+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
99920+extern int grsec_enable_blackhole;
99921+#endif
99922+
99923 #ifdef CONFIG_TCP_MD5SIG
99924 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
99925 __be32 daddr, __be32 saddr, const struct tcphdr *th);
99926@@ -1829,6 +1833,9 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
99927 return 0;
99928
99929 reset:
99930+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
99931+ if (!grsec_enable_blackhole)
99932+#endif
99933 tcp_v4_send_reset(rsk, skb);
99934 discard:
99935 kfree_skb(skb);
99936@@ -1974,12 +1981,19 @@ int tcp_v4_rcv(struct sk_buff *skb)
99937 TCP_SKB_CB(skb)->sacked = 0;
99938
99939 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
99940- if (!sk)
99941+ if (!sk) {
99942+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
99943+ ret = 1;
99944+#endif
99945 goto no_tcp_socket;
99946-
99947+ }
99948 process:
99949- if (sk->sk_state == TCP_TIME_WAIT)
99950+ if (sk->sk_state == TCP_TIME_WAIT) {
99951+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
99952+ ret = 2;
99953+#endif
99954 goto do_time_wait;
99955+ }
99956
99957 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
99958 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
99959@@ -2033,6 +2047,10 @@ csum_error:
99960 bad_packet:
99961 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
99962 } else {
99963+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
99964+ if (!grsec_enable_blackhole || (ret == 1 &&
99965+ (skb->dev->flags & IFF_LOOPBACK)))
99966+#endif
99967 tcp_v4_send_reset(NULL, skb);
99968 }
99969
99970diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
99971index 7a436c5..1b05c59 100644
99972--- a/net/ipv4/tcp_minisocks.c
99973+++ b/net/ipv4/tcp_minisocks.c
99974@@ -27,6 +27,10 @@
99975 #include <net/inet_common.h>
99976 #include <net/xfrm.h>
99977
99978+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
99979+extern int grsec_enable_blackhole;
99980+#endif
99981+
99982 int sysctl_tcp_syncookies __read_mostly = 1;
99983 EXPORT_SYMBOL(sysctl_tcp_syncookies);
99984
99985@@ -709,7 +713,10 @@ embryonic_reset:
99986 * avoid becoming vulnerable to outside attack aiming at
99987 * resetting legit local connections.
99988 */
99989- req->rsk_ops->send_reset(sk, skb);
99990+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
99991+ if (!grsec_enable_blackhole)
99992+#endif
99993+ req->rsk_ops->send_reset(sk, skb);
99994 } else if (fastopen) { /* received a valid RST pkt */
99995 reqsk_fastopen_remove(sk, req, true);
99996 tcp_reset(sk);
99997diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c
99998index 1f2d376..01d18c4 100644
99999--- a/net/ipv4/tcp_probe.c
100000+++ b/net/ipv4/tcp_probe.c
100001@@ -238,7 +238,7 @@ static ssize_t tcpprobe_read(struct file *file, char __user *buf,
100002 if (cnt + width >= len)
100003 break;
100004
100005- if (copy_to_user(buf + cnt, tbuf, width))
100006+ if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width))
100007 return -EFAULT;
100008 cnt += width;
100009 }
100010diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
100011index 64f0354..a81b39d 100644
100012--- a/net/ipv4/tcp_timer.c
100013+++ b/net/ipv4/tcp_timer.c
100014@@ -22,6 +22,10 @@
100015 #include <linux/gfp.h>
100016 #include <net/tcp.h>
100017
100018+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
100019+extern int grsec_lastack_retries;
100020+#endif
100021+
100022 int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
100023 int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
100024 int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
100025@@ -189,6 +193,13 @@ static int tcp_write_timeout(struct sock *sk)
100026 }
100027 }
100028
100029+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
100030+ if ((sk->sk_state == TCP_LAST_ACK) &&
100031+ (grsec_lastack_retries > 0) &&
100032+ (grsec_lastack_retries < retry_until))
100033+ retry_until = grsec_lastack_retries;
100034+#endif
100035+
100036 if (retransmits_timed_out(sk, retry_until,
100037 syn_set ? 0 : icsk->icsk_user_timeout, syn_set)) {
100038 /* Has it gone just too far? */
100039diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
100040index 77bd16f..5f7174a 100644
100041--- a/net/ipv4/udp.c
100042+++ b/net/ipv4/udp.c
100043@@ -87,6 +87,7 @@
100044 #include <linux/types.h>
100045 #include <linux/fcntl.h>
100046 #include <linux/module.h>
100047+#include <linux/security.h>
100048 #include <linux/socket.h>
100049 #include <linux/sockios.h>
100050 #include <linux/igmp.h>
100051@@ -113,6 +114,10 @@
100052 #include <net/busy_poll.h>
100053 #include "udp_impl.h"
100054
100055+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
100056+extern int grsec_enable_blackhole;
100057+#endif
100058+
100059 struct udp_table udp_table __read_mostly;
100060 EXPORT_SYMBOL(udp_table);
100061
100062@@ -615,6 +620,9 @@ found:
100063 return s;
100064 }
100065
100066+extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
100067+extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
100068+
100069 /*
100070 * This routine is called by the ICMP module when it gets some
100071 * sort of error condition. If err < 0 then the socket should
100072@@ -914,9 +922,18 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
100073 dport = usin->sin_port;
100074 if (dport == 0)
100075 return -EINVAL;
100076+
100077+ err = gr_search_udp_sendmsg(sk, usin);
100078+ if (err)
100079+ return err;
100080 } else {
100081 if (sk->sk_state != TCP_ESTABLISHED)
100082 return -EDESTADDRREQ;
100083+
100084+ err = gr_search_udp_sendmsg(sk, NULL);
100085+ if (err)
100086+ return err;
100087+
100088 daddr = inet->inet_daddr;
100089 dport = inet->inet_dport;
100090 /* Open fast path for connected socket.
100091@@ -1163,7 +1180,7 @@ static unsigned int first_packet_length(struct sock *sk)
100092 IS_UDPLITE(sk));
100093 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
100094 IS_UDPLITE(sk));
100095- atomic_inc(&sk->sk_drops);
100096+ atomic_inc_unchecked(&sk->sk_drops);
100097 __skb_unlink(skb, rcvq);
100098 __skb_queue_tail(&list_kill, skb);
100099 }
100100@@ -1234,6 +1251,12 @@ int udp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
100101 int is_udplite = IS_UDPLITE(sk);
100102 bool slow;
100103
100104+ /*
100105+ * Check any passed addresses
100106+ */
100107+ if (addr_len)
100108+ *addr_len = sizeof(*sin);
100109+
100110 if (flags & MSG_ERRQUEUE)
100111 return ip_recv_error(sk, msg, len, addr_len);
100112
100113@@ -1243,6 +1266,10 @@ try_again:
100114 if (!skb)
100115 goto out;
100116
100117+ err = gr_search_udp_recvmsg(sk, skb);
100118+ if (err)
100119+ goto out_free;
100120+
100121 ulen = skb->len - sizeof(struct udphdr);
100122 copied = len;
100123 if (copied > ulen)
100124@@ -1276,7 +1303,7 @@ try_again:
100125 if (unlikely(err)) {
100126 trace_kfree_skb(skb, udp_recvmsg);
100127 if (!peeked) {
100128- atomic_inc(&sk->sk_drops);
100129+ atomic_inc_unchecked(&sk->sk_drops);
100130 UDP_INC_STATS_USER(sock_net(sk),
100131 UDP_MIB_INERRORS, is_udplite);
100132 }
100133@@ -1295,7 +1322,6 @@ try_again:
100134 sin->sin_port = udp_hdr(skb)->source;
100135 sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
100136 memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
100137- *addr_len = sizeof(*sin);
100138 }
100139 if (inet->cmsg_flags)
100140 ip_cmsg_recv(msg, skb);
100141@@ -1566,7 +1592,7 @@ csum_error:
100142 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
100143 drop:
100144 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
100145- atomic_inc(&sk->sk_drops);
100146+ atomic_inc_unchecked(&sk->sk_drops);
100147 kfree_skb(skb);
100148 return -1;
100149 }
100150@@ -1585,7 +1611,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
100151 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
100152
100153 if (!skb1) {
100154- atomic_inc(&sk->sk_drops);
100155+ atomic_inc_unchecked(&sk->sk_drops);
100156 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
100157 IS_UDPLITE(sk));
100158 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
100159@@ -1786,6 +1812,9 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
100160 goto csum_error;
100161
100162 UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
100163+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
100164+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
100165+#endif
100166 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
100167
100168 /*
100169@@ -2350,7 +2379,7 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f,
100170 from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)),
100171 0, sock_i_ino(sp),
100172 atomic_read(&sp->sk_refcnt), sp,
100173- atomic_read(&sp->sk_drops));
100174+ atomic_read_unchecked(&sp->sk_drops));
100175 }
100176
100177 int udp4_seq_show(struct seq_file *seq, void *v)
100178diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
100179index e1a6393..f634ce5 100644
100180--- a/net/ipv4/xfrm4_policy.c
100181+++ b/net/ipv4/xfrm4_policy.c
100182@@ -186,11 +186,11 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse)
100183 fl4->flowi4_tos = iph->tos;
100184 }
100185
100186-static inline int xfrm4_garbage_collect(struct dst_ops *ops)
100187+static int xfrm4_garbage_collect(struct dst_ops *ops)
100188 {
100189 struct net *net = container_of(ops, struct net, xfrm.xfrm4_dst_ops);
100190
100191- xfrm4_policy_afinfo.garbage_collect(net);
100192+ xfrm_garbage_collect_deferred(net);
100193 return (dst_entries_get_slow(ops) > ops->gc_thresh * 2);
100194 }
100195
100196@@ -269,19 +269,18 @@ static struct ctl_table xfrm4_policy_table[] = {
100197
100198 static int __net_init xfrm4_net_init(struct net *net)
100199 {
100200- struct ctl_table *table;
100201+ ctl_table_no_const *table = NULL;
100202 struct ctl_table_header *hdr;
100203
100204- table = xfrm4_policy_table;
100205 if (!net_eq(net, &init_net)) {
100206- table = kmemdup(table, sizeof(xfrm4_policy_table), GFP_KERNEL);
100207+ table = kmemdup(xfrm4_policy_table, sizeof(xfrm4_policy_table), GFP_KERNEL);
100208 if (!table)
100209 goto err_alloc;
100210
100211 table[0].data = &net->xfrm.xfrm4_dst_ops.gc_thresh;
100212- }
100213-
100214- hdr = register_net_sysctl(net, "net/ipv4", table);
100215+ hdr = register_net_sysctl(net, "net/ipv4", table);
100216+ } else
100217+ hdr = register_net_sysctl(net, "net/ipv4", xfrm4_policy_table);
100218 if (!hdr)
100219 goto err_reg;
100220
100221@@ -289,8 +288,7 @@ static int __net_init xfrm4_net_init(struct net *net)
100222 return 0;
100223
100224 err_reg:
100225- if (!net_eq(net, &init_net))
100226- kfree(table);
100227+ kfree(table);
100228 err_alloc:
100229 return -ENOMEM;
100230 }
100231diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
100232index 6c7fa08..285086c 100644
100233--- a/net/ipv6/addrconf.c
100234+++ b/net/ipv6/addrconf.c
100235@@ -598,7 +598,7 @@ static int inet6_netconf_dump_devconf(struct sk_buff *skb,
100236 idx = 0;
100237 head = &net->dev_index_head[h];
100238 rcu_read_lock();
100239- cb->seq = atomic_read(&net->ipv6.dev_addr_genid) ^
100240+ cb->seq = atomic_read_unchecked(&net->ipv6.dev_addr_genid) ^
100241 net->dev_base_seq;
100242 hlist_for_each_entry_rcu(dev, head, index_hlist) {
100243 if (idx < s_idx)
100244@@ -2395,7 +2395,7 @@ int addrconf_set_dstaddr(struct net *net, void __user *arg)
100245 p.iph.ihl = 5;
100246 p.iph.protocol = IPPROTO_IPV6;
100247 p.iph.ttl = 64;
100248- ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
100249+ ifr.ifr_ifru.ifru_data = (void __force_user *)&p;
100250
100251 if (ops->ndo_do_ioctl) {
100252 mm_segment_t oldfs = get_fs();
100253@@ -4146,7 +4146,7 @@ static int inet6_dump_addr(struct sk_buff *skb, struct netlink_callback *cb,
100254 s_ip_idx = ip_idx = cb->args[2];
100255
100256 rcu_read_lock();
100257- cb->seq = atomic_read(&net->ipv6.dev_addr_genid) ^ net->dev_base_seq;
100258+ cb->seq = atomic_read_unchecked(&net->ipv6.dev_addr_genid) ^ net->dev_base_seq;
100259 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
100260 idx = 0;
100261 head = &net->dev_index_head[h];
100262@@ -4758,7 +4758,7 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
100263 dst_free(&ifp->rt->dst);
100264 break;
100265 }
100266- atomic_inc(&net->ipv6.dev_addr_genid);
100267+ atomic_inc_unchecked(&net->ipv6.dev_addr_genid);
100268 rt_genid_bump_ipv6(net);
100269 }
100270
100271@@ -4779,7 +4779,7 @@ int addrconf_sysctl_forward(struct ctl_table *ctl, int write,
100272 int *valp = ctl->data;
100273 int val = *valp;
100274 loff_t pos = *ppos;
100275- struct ctl_table lctl;
100276+ ctl_table_no_const lctl;
100277 int ret;
100278
100279 /*
100280@@ -4864,7 +4864,7 @@ int addrconf_sysctl_disable(struct ctl_table *ctl, int write,
100281 int *valp = ctl->data;
100282 int val = *valp;
100283 loff_t pos = *ppos;
100284- struct ctl_table lctl;
100285+ ctl_table_no_const lctl;
100286 int ret;
100287
100288 /*
100289diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
100290index d935889..2f64330 100644
100291--- a/net/ipv6/af_inet6.c
100292+++ b/net/ipv6/af_inet6.c
100293@@ -776,7 +776,7 @@ static int __net_init inet6_net_init(struct net *net)
100294 net->ipv6.sysctl.bindv6only = 0;
100295 net->ipv6.sysctl.icmpv6_time = 1*HZ;
100296 net->ipv6.sysctl.flowlabel_consistency = 1;
100297- atomic_set(&net->ipv6.rt_genid, 0);
100298+ atomic_set_unchecked(&net->ipv6.rt_genid, 0);
100299
100300 err = ipv6_init_mibs(net);
100301 if (err)
100302diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
100303index c3bf2d2..1f00573 100644
100304--- a/net/ipv6/datagram.c
100305+++ b/net/ipv6/datagram.c
100306@@ -938,5 +938,5 @@ void ip6_dgram_sock_seq_show(struct seq_file *seq, struct sock *sp,
100307 0,
100308 sock_i_ino(sp),
100309 atomic_read(&sp->sk_refcnt), sp,
100310- atomic_read(&sp->sk_drops));
100311+ atomic_read_unchecked(&sp->sk_drops));
100312 }
100313diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
100314index 7b32652..0bc348b 100644
100315--- a/net/ipv6/icmp.c
100316+++ b/net/ipv6/icmp.c
100317@@ -1005,7 +1005,7 @@ static struct ctl_table ipv6_icmp_table_template[] = {
100318
100319 struct ctl_table * __net_init ipv6_icmp_sysctl_init(struct net *net)
100320 {
100321- struct ctl_table *table;
100322+ ctl_table_no_const *table;
100323
100324 table = kmemdup(ipv6_icmp_table_template,
100325 sizeof(ipv6_icmp_table_template),
100326diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
100327index 2465d18..bc5bf7f 100644
100328--- a/net/ipv6/ip6_gre.c
100329+++ b/net/ipv6/ip6_gre.c
100330@@ -71,7 +71,7 @@ struct ip6gre_net {
100331 struct net_device *fb_tunnel_dev;
100332 };
100333
100334-static struct rtnl_link_ops ip6gre_link_ops __read_mostly;
100335+static struct rtnl_link_ops ip6gre_link_ops;
100336 static int ip6gre_tunnel_init(struct net_device *dev);
100337 static void ip6gre_tunnel_setup(struct net_device *dev);
100338 static void ip6gre_tunnel_link(struct ip6gre_net *ign, struct ip6_tnl *t);
100339@@ -1291,7 +1291,7 @@ static void ip6gre_fb_tunnel_init(struct net_device *dev)
100340 }
100341
100342
100343-static struct inet6_protocol ip6gre_protocol __read_mostly = {
100344+static struct inet6_protocol ip6gre_protocol = {
100345 .handler = ip6gre_rcv,
100346 .err_handler = ip6gre_err,
100347 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
100348@@ -1643,7 +1643,7 @@ static const struct nla_policy ip6gre_policy[IFLA_GRE_MAX + 1] = {
100349 [IFLA_GRE_FLAGS] = { .type = NLA_U32 },
100350 };
100351
100352-static struct rtnl_link_ops ip6gre_link_ops __read_mostly = {
100353+static struct rtnl_link_ops ip6gre_link_ops = {
100354 .kind = "ip6gre",
100355 .maxtype = IFLA_GRE_MAX,
100356 .policy = ip6gre_policy,
100357@@ -1657,7 +1657,7 @@ static struct rtnl_link_ops ip6gre_link_ops __read_mostly = {
100358 .fill_info = ip6gre_fill_info,
100359 };
100360
100361-static struct rtnl_link_ops ip6gre_tap_ops __read_mostly = {
100362+static struct rtnl_link_ops ip6gre_tap_ops = {
100363 .kind = "ip6gretap",
100364 .maxtype = IFLA_GRE_MAX,
100365 .policy = ip6gre_policy,
100366diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
100367index 0e51f68..1f501e1 100644
100368--- a/net/ipv6/ip6_tunnel.c
100369+++ b/net/ipv6/ip6_tunnel.c
100370@@ -85,7 +85,7 @@ static u32 HASH(const struct in6_addr *addr1, const struct in6_addr *addr2)
100371
100372 static int ip6_tnl_dev_init(struct net_device *dev);
100373 static void ip6_tnl_dev_setup(struct net_device *dev);
100374-static struct rtnl_link_ops ip6_link_ops __read_mostly;
100375+static struct rtnl_link_ops ip6_link_ops;
100376
100377 static int ip6_tnl_net_id __read_mostly;
100378 struct ip6_tnl_net {
100379@@ -1714,7 +1714,7 @@ static const struct nla_policy ip6_tnl_policy[IFLA_IPTUN_MAX + 1] = {
100380 [IFLA_IPTUN_PROTO] = { .type = NLA_U8 },
100381 };
100382
100383-static struct rtnl_link_ops ip6_link_ops __read_mostly = {
100384+static struct rtnl_link_ops ip6_link_ops = {
100385 .kind = "ip6tnl",
100386 .maxtype = IFLA_IPTUN_MAX,
100387 .policy = ip6_tnl_policy,
100388diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
100389index 2d19272..3a46322 100644
100390--- a/net/ipv6/ip6_vti.c
100391+++ b/net/ipv6/ip6_vti.c
100392@@ -62,7 +62,7 @@ static u32 HASH(const struct in6_addr *addr1, const struct in6_addr *addr2)
100393
100394 static int vti6_dev_init(struct net_device *dev);
100395 static void vti6_dev_setup(struct net_device *dev);
100396-static struct rtnl_link_ops vti6_link_ops __read_mostly;
100397+static struct rtnl_link_ops vti6_link_ops;
100398
100399 static int vti6_net_id __read_mostly;
100400 struct vti6_net {
100401@@ -901,7 +901,7 @@ static const struct nla_policy vti6_policy[IFLA_VTI_MAX + 1] = {
100402 [IFLA_VTI_OKEY] = { .type = NLA_U32 },
100403 };
100404
100405-static struct rtnl_link_ops vti6_link_ops __read_mostly = {
100406+static struct rtnl_link_ops vti6_link_ops = {
100407 .kind = "vti6",
100408 .maxtype = IFLA_VTI_MAX,
100409 .policy = vti6_policy,
100410diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
100411index 0a00f44..bec42b2 100644
100412--- a/net/ipv6/ipv6_sockglue.c
100413+++ b/net/ipv6/ipv6_sockglue.c
100414@@ -991,7 +991,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
100415 if (sk->sk_type != SOCK_STREAM)
100416 return -ENOPROTOOPT;
100417
100418- msg.msg_control = optval;
100419+ msg.msg_control = (void __force_kernel *)optval;
100420 msg.msg_controllen = len;
100421 msg.msg_flags = flags;
100422
100423diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
100424index e080fbb..412b3cf 100644
100425--- a/net/ipv6/netfilter/ip6_tables.c
100426+++ b/net/ipv6/netfilter/ip6_tables.c
100427@@ -1083,14 +1083,14 @@ static int compat_table_info(const struct xt_table_info *info,
100428 #endif
100429
100430 static int get_info(struct net *net, void __user *user,
100431- const int *len, int compat)
100432+ int len, int compat)
100433 {
100434 char name[XT_TABLE_MAXNAMELEN];
100435 struct xt_table *t;
100436 int ret;
100437
100438- if (*len != sizeof(struct ip6t_getinfo)) {
100439- duprintf("length %u != %zu\n", *len,
100440+ if (len != sizeof(struct ip6t_getinfo)) {
100441+ duprintf("length %u != %zu\n", len,
100442 sizeof(struct ip6t_getinfo));
100443 return -EINVAL;
100444 }
100445@@ -1127,7 +1127,7 @@ static int get_info(struct net *net, void __user *user,
100446 info.size = private->size;
100447 strcpy(info.name, name);
100448
100449- if (copy_to_user(user, &info, *len) != 0)
100450+ if (copy_to_user(user, &info, len) != 0)
100451 ret = -EFAULT;
100452 else
100453 ret = 0;
100454@@ -1983,7 +1983,7 @@ compat_do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
100455
100456 switch (cmd) {
100457 case IP6T_SO_GET_INFO:
100458- ret = get_info(sock_net(sk), user, len, 1);
100459+ ret = get_info(sock_net(sk), user, *len, 1);
100460 break;
100461 case IP6T_SO_GET_ENTRIES:
100462 ret = compat_get_entries(sock_net(sk), user, len);
100463@@ -2030,7 +2030,7 @@ do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
100464
100465 switch (cmd) {
100466 case IP6T_SO_GET_INFO:
100467- ret = get_info(sock_net(sk), user, len, 0);
100468+ ret = get_info(sock_net(sk), user, *len, 0);
100469 break;
100470
100471 case IP6T_SO_GET_ENTRIES:
100472diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
100473index 767ab8d..c5ec70a 100644
100474--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
100475+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
100476@@ -90,12 +90,11 @@ static struct ctl_table nf_ct_frag6_sysctl_table[] = {
100477
100478 static int nf_ct_frag6_sysctl_register(struct net *net)
100479 {
100480- struct ctl_table *table;
100481+ ctl_table_no_const *table = NULL;
100482 struct ctl_table_header *hdr;
100483
100484- table = nf_ct_frag6_sysctl_table;
100485 if (!net_eq(net, &init_net)) {
100486- table = kmemdup(table, sizeof(nf_ct_frag6_sysctl_table),
100487+ table = kmemdup(nf_ct_frag6_sysctl_table, sizeof(nf_ct_frag6_sysctl_table),
100488 GFP_KERNEL);
100489 if (table == NULL)
100490 goto err_alloc;
100491@@ -103,9 +102,9 @@ static int nf_ct_frag6_sysctl_register(struct net *net)
100492 table[0].data = &net->nf_frag.frags.timeout;
100493 table[1].data = &net->nf_frag.frags.low_thresh;
100494 table[2].data = &net->nf_frag.frags.high_thresh;
100495- }
100496-
100497- hdr = register_net_sysctl(net, "net/netfilter", table);
100498+ hdr = register_net_sysctl(net, "net/netfilter", table);
100499+ } else
100500+ hdr = register_net_sysctl(net, "net/netfilter", nf_ct_frag6_sysctl_table);
100501 if (hdr == NULL)
100502 goto err_reg;
100503
100504@@ -113,8 +112,7 @@ static int nf_ct_frag6_sysctl_register(struct net *net)
100505 return 0;
100506
100507 err_reg:
100508- if (!net_eq(net, &init_net))
100509- kfree(table);
100510+ kfree(table);
100511 err_alloc:
100512 return -ENOMEM;
100513 }
100514diff --git a/net/ipv6/output_core.c b/net/ipv6/output_core.c
100515index 827f795..bdff9eb 100644
100516--- a/net/ipv6/output_core.c
100517+++ b/net/ipv6/output_core.c
100518@@ -9,8 +9,8 @@
100519
100520 void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt)
100521 {
100522- static atomic_t ipv6_fragmentation_id;
100523- int old, new;
100524+ static atomic_unchecked_t ipv6_fragmentation_id;
100525+ int id;
100526
100527 #if IS_ENABLED(CONFIG_IPV6)
100528 if (rt && !(rt->dst.flags & DST_NOPEER)) {
100529@@ -26,13 +26,8 @@ void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt)
100530 }
100531 }
100532 #endif
100533- do {
100534- old = atomic_read(&ipv6_fragmentation_id);
100535- new = old + 1;
100536- if (!new)
100537- new = 1;
100538- } while (atomic_cmpxchg(&ipv6_fragmentation_id, old, new) != old);
100539- fhdr->identification = htonl(new);
100540+ id = atomic_inc_return_unchecked(&ipv6_fragmentation_id);
100541+ fhdr->identification = htonl(id);
100542 }
100543 EXPORT_SYMBOL(ipv6_select_ident);
100544
100545diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c
100546index bda7429..469b26b 100644
100547--- a/net/ipv6/ping.c
100548+++ b/net/ipv6/ping.c
100549@@ -246,6 +246,24 @@ static struct pernet_operations ping_v6_net_ops = {
100550 };
100551 #endif
100552
100553+static struct pingv6_ops real_pingv6_ops = {
100554+ .ipv6_recv_error = ipv6_recv_error,
100555+ .ip6_datagram_recv_common_ctl = ip6_datagram_recv_common_ctl,
100556+ .ip6_datagram_recv_specific_ctl = ip6_datagram_recv_specific_ctl,
100557+ .icmpv6_err_convert = icmpv6_err_convert,
100558+ .ipv6_icmp_error = ipv6_icmp_error,
100559+ .ipv6_chk_addr = ipv6_chk_addr,
100560+};
100561+
100562+static struct pingv6_ops dummy_pingv6_ops = {
100563+ .ipv6_recv_error = dummy_ipv6_recv_error,
100564+ .ip6_datagram_recv_common_ctl = dummy_ip6_datagram_recv_ctl,
100565+ .ip6_datagram_recv_specific_ctl = dummy_ip6_datagram_recv_ctl,
100566+ .icmpv6_err_convert = dummy_icmpv6_err_convert,
100567+ .ipv6_icmp_error = dummy_ipv6_icmp_error,
100568+ .ipv6_chk_addr = dummy_ipv6_chk_addr,
100569+};
100570+
100571 int __init pingv6_init(void)
100572 {
100573 #ifdef CONFIG_PROC_FS
100574@@ -253,13 +271,7 @@ int __init pingv6_init(void)
100575 if (ret)
100576 return ret;
100577 #endif
100578- pingv6_ops.ipv6_recv_error = ipv6_recv_error;
100579- pingv6_ops.ip6_datagram_recv_common_ctl = ip6_datagram_recv_common_ctl;
100580- pingv6_ops.ip6_datagram_recv_specific_ctl =
100581- ip6_datagram_recv_specific_ctl;
100582- pingv6_ops.icmpv6_err_convert = icmpv6_err_convert;
100583- pingv6_ops.ipv6_icmp_error = ipv6_icmp_error;
100584- pingv6_ops.ipv6_chk_addr = ipv6_chk_addr;
100585+ pingv6_ops = &real_pingv6_ops;
100586 return inet6_register_protosw(&pingv6_protosw);
100587 }
100588
100589@@ -268,14 +280,9 @@ int __init pingv6_init(void)
100590 */
100591 void pingv6_exit(void)
100592 {
100593- pingv6_ops.ipv6_recv_error = dummy_ipv6_recv_error;
100594- pingv6_ops.ip6_datagram_recv_common_ctl = dummy_ip6_datagram_recv_ctl;
100595- pingv6_ops.ip6_datagram_recv_specific_ctl = dummy_ip6_datagram_recv_ctl;
100596- pingv6_ops.icmpv6_err_convert = dummy_icmpv6_err_convert;
100597- pingv6_ops.ipv6_icmp_error = dummy_ipv6_icmp_error;
100598- pingv6_ops.ipv6_chk_addr = dummy_ipv6_chk_addr;
100599 #ifdef CONFIG_PROC_FS
100600 unregister_pernet_subsys(&ping_v6_net_ops);
100601 #endif
100602+ pingv6_ops = &dummy_pingv6_ops;
100603 inet6_unregister_protosw(&pingv6_protosw);
100604 }
100605diff --git a/net/ipv6/proc.c b/net/ipv6/proc.c
100606index 091d066..139d410 100644
100607--- a/net/ipv6/proc.c
100608+++ b/net/ipv6/proc.c
100609@@ -309,7 +309,7 @@ static int __net_init ipv6_proc_init_net(struct net *net)
100610 if (!proc_create("snmp6", S_IRUGO, net->proc_net, &snmp6_seq_fops))
100611 goto proc_snmp6_fail;
100612
100613- net->mib.proc_net_devsnmp6 = proc_mkdir("dev_snmp6", net->proc_net);
100614+ net->mib.proc_net_devsnmp6 = proc_mkdir_restrict("dev_snmp6", net->proc_net);
100615 if (!net->mib.proc_net_devsnmp6)
100616 goto proc_dev_snmp6_fail;
100617 return 0;
100618diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
100619index 1f29996..46fe0c7 100644
100620--- a/net/ipv6/raw.c
100621+++ b/net/ipv6/raw.c
100622@@ -388,7 +388,7 @@ static inline int rawv6_rcv_skb(struct sock *sk, struct sk_buff *skb)
100623 {
100624 if ((raw6_sk(sk)->checksum || rcu_access_pointer(sk->sk_filter)) &&
100625 skb_checksum_complete(skb)) {
100626- atomic_inc(&sk->sk_drops);
100627+ atomic_inc_unchecked(&sk->sk_drops);
100628 kfree_skb(skb);
100629 return NET_RX_DROP;
100630 }
100631@@ -416,7 +416,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
100632 struct raw6_sock *rp = raw6_sk(sk);
100633
100634 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
100635- atomic_inc(&sk->sk_drops);
100636+ atomic_inc_unchecked(&sk->sk_drops);
100637 kfree_skb(skb);
100638 return NET_RX_DROP;
100639 }
100640@@ -440,7 +440,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
100641
100642 if (inet->hdrincl) {
100643 if (skb_checksum_complete(skb)) {
100644- atomic_inc(&sk->sk_drops);
100645+ atomic_inc_unchecked(&sk->sk_drops);
100646 kfree_skb(skb);
100647 return NET_RX_DROP;
100648 }
100649@@ -469,6 +469,9 @@ static int rawv6_recvmsg(struct kiocb *iocb, struct sock *sk,
100650 if (flags & MSG_OOB)
100651 return -EOPNOTSUPP;
100652
100653+ if (addr_len)
100654+ *addr_len=sizeof(*sin6);
100655+
100656 if (flags & MSG_ERRQUEUE)
100657 return ipv6_recv_error(sk, msg, len, addr_len);
100658
100659@@ -507,7 +510,6 @@ static int rawv6_recvmsg(struct kiocb *iocb, struct sock *sk,
100660 sin6->sin6_flowinfo = 0;
100661 sin6->sin6_scope_id = ipv6_iface_scope_id(&sin6->sin6_addr,
100662 IP6CB(skb)->iif);
100663- *addr_len = sizeof(*sin6);
100664 }
100665
100666 sock_recv_ts_and_drops(msg, sk, skb);
100667@@ -610,7 +612,7 @@ out:
100668 return err;
100669 }
100670
100671-static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
100672+static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length,
100673 struct flowi6 *fl6, struct dst_entry **dstp,
100674 unsigned int flags)
100675 {
100676@@ -922,12 +924,15 @@ do_confirm:
100677 static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
100678 char __user *optval, int optlen)
100679 {
100680+ struct icmp6_filter filter;
100681+
100682 switch (optname) {
100683 case ICMPV6_FILTER:
100684 if (optlen > sizeof(struct icmp6_filter))
100685 optlen = sizeof(struct icmp6_filter);
100686- if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
100687+ if (copy_from_user(&filter, optval, optlen))
100688 return -EFAULT;
100689+ raw6_sk(sk)->filter = filter;
100690 return 0;
100691 default:
100692 return -ENOPROTOOPT;
100693@@ -940,6 +945,7 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
100694 char __user *optval, int __user *optlen)
100695 {
100696 int len;
100697+ struct icmp6_filter filter;
100698
100699 switch (optname) {
100700 case ICMPV6_FILTER:
100701@@ -951,7 +957,8 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
100702 len = sizeof(struct icmp6_filter);
100703 if (put_user(len, optlen))
100704 return -EFAULT;
100705- if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
100706+ filter = raw6_sk(sk)->filter;
100707+ if (len > sizeof filter || copy_to_user(optval, &filter, len))
100708 return -EFAULT;
100709 return 0;
100710 default:
100711diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
100712index cc85a9b..526a133 100644
100713--- a/net/ipv6/reassembly.c
100714+++ b/net/ipv6/reassembly.c
100715@@ -626,12 +626,11 @@ static struct ctl_table ip6_frags_ctl_table[] = {
100716
100717 static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
100718 {
100719- struct ctl_table *table;
100720+ ctl_table_no_const *table = NULL;
100721 struct ctl_table_header *hdr;
100722
100723- table = ip6_frags_ns_ctl_table;
100724 if (!net_eq(net, &init_net)) {
100725- table = kmemdup(table, sizeof(ip6_frags_ns_ctl_table), GFP_KERNEL);
100726+ table = kmemdup(ip6_frags_ns_ctl_table, sizeof(ip6_frags_ns_ctl_table), GFP_KERNEL);
100727 if (table == NULL)
100728 goto err_alloc;
100729
100730@@ -642,9 +641,10 @@ static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
100731 /* Don't export sysctls to unprivileged users */
100732 if (net->user_ns != &init_user_ns)
100733 table[0].procname = NULL;
100734- }
100735+ hdr = register_net_sysctl(net, "net/ipv6", table);
100736+ } else
100737+ hdr = register_net_sysctl(net, "net/ipv6", ip6_frags_ns_ctl_table);
100738
100739- hdr = register_net_sysctl(net, "net/ipv6", table);
100740 if (hdr == NULL)
100741 goto err_reg;
100742
100743@@ -652,8 +652,7 @@ static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
100744 return 0;
100745
100746 err_reg:
100747- if (!net_eq(net, &init_net))
100748- kfree(table);
100749+ kfree(table);
100750 err_alloc:
100751 return -ENOMEM;
100752 }
100753diff --git a/net/ipv6/route.c b/net/ipv6/route.c
100754index 7cc1102..7785931 100644
100755--- a/net/ipv6/route.c
100756+++ b/net/ipv6/route.c
100757@@ -2973,7 +2973,7 @@ struct ctl_table ipv6_route_table_template[] = {
100758
100759 struct ctl_table * __net_init ipv6_route_sysctl_init(struct net *net)
100760 {
100761- struct ctl_table *table;
100762+ ctl_table_no_const *table;
100763
100764 table = kmemdup(ipv6_route_table_template,
100765 sizeof(ipv6_route_table_template),
100766diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
100767index b4d74c8..b4f3fbe 100644
100768--- a/net/ipv6/sit.c
100769+++ b/net/ipv6/sit.c
100770@@ -74,7 +74,7 @@ static void ipip6_tunnel_setup(struct net_device *dev);
100771 static void ipip6_dev_free(struct net_device *dev);
100772 static bool check_6rd(struct ip_tunnel *tunnel, const struct in6_addr *v6dst,
100773 __be32 *v4dst);
100774-static struct rtnl_link_ops sit_link_ops __read_mostly;
100775+static struct rtnl_link_ops sit_link_ops;
100776
100777 static int sit_net_id __read_mostly;
100778 struct sit_net {
100779@@ -1683,7 +1683,7 @@ static void ipip6_dellink(struct net_device *dev, struct list_head *head)
100780 unregister_netdevice_queue(dev, head);
100781 }
100782
100783-static struct rtnl_link_ops sit_link_ops __read_mostly = {
100784+static struct rtnl_link_ops sit_link_ops = {
100785 .kind = "sit",
100786 .maxtype = IFLA_IPTUN_MAX,
100787 .policy = ipip6_policy,
100788diff --git a/net/ipv6/sysctl_net_ipv6.c b/net/ipv6/sysctl_net_ipv6.c
100789index 7f405a1..eabef92 100644
100790--- a/net/ipv6/sysctl_net_ipv6.c
100791+++ b/net/ipv6/sysctl_net_ipv6.c
100792@@ -54,7 +54,7 @@ static struct ctl_table ipv6_rotable[] = {
100793
100794 static int __net_init ipv6_sysctl_net_init(struct net *net)
100795 {
100796- struct ctl_table *ipv6_table;
100797+ ctl_table_no_const *ipv6_table;
100798 struct ctl_table *ipv6_route_table;
100799 struct ctl_table *ipv6_icmp_table;
100800 int err;
100801diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
100802index 889079b..a04512c 100644
100803--- a/net/ipv6/tcp_ipv6.c
100804+++ b/net/ipv6/tcp_ipv6.c
100805@@ -104,6 +104,10 @@ static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
100806 inet6_sk(sk)->rx_dst_cookie = rt->rt6i_node->fn_sernum;
100807 }
100808
100809+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
100810+extern int grsec_enable_blackhole;
100811+#endif
100812+
100813 static void tcp_v6_hash(struct sock *sk)
100814 {
100815 if (sk->sk_state != TCP_CLOSE) {
100816@@ -1412,6 +1416,9 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
100817 return 0;
100818
100819 reset:
100820+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
100821+ if (!grsec_enable_blackhole)
100822+#endif
100823 tcp_v6_send_reset(sk, skb);
100824 discard:
100825 if (opt_skb)
100826@@ -1496,12 +1503,20 @@ static int tcp_v6_rcv(struct sk_buff *skb)
100827 TCP_SKB_CB(skb)->sacked = 0;
100828
100829 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
100830- if (!sk)
100831+ if (!sk) {
100832+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
100833+ ret = 1;
100834+#endif
100835 goto no_tcp_socket;
100836+ }
100837
100838 process:
100839- if (sk->sk_state == TCP_TIME_WAIT)
100840+ if (sk->sk_state == TCP_TIME_WAIT) {
100841+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
100842+ ret = 2;
100843+#endif
100844 goto do_time_wait;
100845+ }
100846
100847 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
100848 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
100849@@ -1553,6 +1568,10 @@ csum_error:
100850 bad_packet:
100851 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
100852 } else {
100853+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
100854+ if (!grsec_enable_blackhole || (ret == 1 &&
100855+ (skb->dev->flags & IFF_LOOPBACK)))
100856+#endif
100857 tcp_v6_send_reset(NULL, skb);
100858 }
100859
100860diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
100861index 1e586d9..384a9c9 100644
100862--- a/net/ipv6/udp.c
100863+++ b/net/ipv6/udp.c
100864@@ -76,6 +76,10 @@ static unsigned int udp6_ehashfn(struct net *net,
100865 udp_ipv6_hash_secret + net_hash_mix(net));
100866 }
100867
100868+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
100869+extern int grsec_enable_blackhole;
100870+#endif
100871+
100872 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
100873 {
100874 const struct in6_addr *sk2_rcv_saddr6 = inet6_rcv_saddr(sk2);
100875@@ -392,6 +396,9 @@ int udpv6_recvmsg(struct kiocb *iocb, struct sock *sk,
100876 int is_udp4;
100877 bool slow;
100878
100879+ if (addr_len)
100880+ *addr_len = sizeof(struct sockaddr_in6);
100881+
100882 if (flags & MSG_ERRQUEUE)
100883 return ipv6_recv_error(sk, msg, len, addr_len);
100884
100885@@ -435,7 +442,7 @@ try_again:
100886 if (unlikely(err)) {
100887 trace_kfree_skb(skb, udpv6_recvmsg);
100888 if (!peeked) {
100889- atomic_inc(&sk->sk_drops);
100890+ atomic_inc_unchecked(&sk->sk_drops);
100891 if (is_udp4)
100892 UDP_INC_STATS_USER(sock_net(sk),
100893 UDP_MIB_INERRORS,
100894@@ -475,7 +482,7 @@ try_again:
100895 ipv6_iface_scope_id(&sin6->sin6_addr,
100896 IP6CB(skb)->iif);
100897 }
100898- *addr_len = sizeof(*sin6);
100899+
100900 }
100901
100902 if (np->rxopt.all)
100903@@ -690,7 +697,7 @@ csum_error:
100904 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
100905 drop:
100906 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
100907- atomic_inc(&sk->sk_drops);
100908+ atomic_inc_unchecked(&sk->sk_drops);
100909 kfree_skb(skb);
100910 return -1;
100911 }
100912@@ -747,7 +754,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
100913 if (likely(skb1 == NULL))
100914 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
100915 if (!skb1) {
100916- atomic_inc(&sk->sk_drops);
100917+ atomic_inc_unchecked(&sk->sk_drops);
100918 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
100919 IS_UDPLITE(sk));
100920 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
100921@@ -886,6 +893,9 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
100922 goto csum_error;
100923
100924 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
100925+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
100926+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
100927+#endif
100928 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
100929
100930 kfree_skb(skb);
100931diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
100932index 5f8e128..865d38e 100644
100933--- a/net/ipv6/xfrm6_policy.c
100934+++ b/net/ipv6/xfrm6_policy.c
100935@@ -212,11 +212,11 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse)
100936 }
100937 }
100938
100939-static inline int xfrm6_garbage_collect(struct dst_ops *ops)
100940+static int xfrm6_garbage_collect(struct dst_ops *ops)
100941 {
100942 struct net *net = container_of(ops, struct net, xfrm.xfrm6_dst_ops);
100943
100944- xfrm6_policy_afinfo.garbage_collect(net);
100945+ xfrm_garbage_collect_deferred(net);
100946 return dst_entries_get_fast(ops) > ops->gc_thresh * 2;
100947 }
100948
100949@@ -329,19 +329,19 @@ static struct ctl_table xfrm6_policy_table[] = {
100950
100951 static int __net_init xfrm6_net_init(struct net *net)
100952 {
100953- struct ctl_table *table;
100954+ ctl_table_no_const *table = NULL;
100955 struct ctl_table_header *hdr;
100956
100957- table = xfrm6_policy_table;
100958 if (!net_eq(net, &init_net)) {
100959- table = kmemdup(table, sizeof(xfrm6_policy_table), GFP_KERNEL);
100960+ table = kmemdup(xfrm6_policy_table, sizeof(xfrm6_policy_table), GFP_KERNEL);
100961 if (!table)
100962 goto err_alloc;
100963
100964 table[0].data = &net->xfrm.xfrm6_dst_ops.gc_thresh;
100965- }
100966+ hdr = register_net_sysctl(net, "net/ipv6", table);
100967+ } else
100968+ hdr = register_net_sysctl(net, "net/ipv6", xfrm6_policy_table);
100969
100970- hdr = register_net_sysctl(net, "net/ipv6", table);
100971 if (!hdr)
100972 goto err_reg;
100973
100974@@ -349,8 +349,7 @@ static int __net_init xfrm6_net_init(struct net *net)
100975 return 0;
100976
100977 err_reg:
100978- if (!net_eq(net, &init_net))
100979- kfree(table);
100980+ kfree(table);
100981 err_alloc:
100982 return -ENOMEM;
100983 }
100984diff --git a/net/ipx/ipx_proc.c b/net/ipx/ipx_proc.c
100985index e15c16a..7cf07aa 100644
100986--- a/net/ipx/ipx_proc.c
100987+++ b/net/ipx/ipx_proc.c
100988@@ -289,7 +289,7 @@ int __init ipx_proc_init(void)
100989 struct proc_dir_entry *p;
100990 int rc = -ENOMEM;
100991
100992- ipx_proc_dir = proc_mkdir("ipx", init_net.proc_net);
100993+ ipx_proc_dir = proc_mkdir_restrict("ipx", init_net.proc_net);
100994
100995 if (!ipx_proc_dir)
100996 goto out;
100997diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c
100998index 2ba8b97..6d33010 100644
100999--- a/net/irda/ircomm/ircomm_tty.c
101000+++ b/net/irda/ircomm/ircomm_tty.c
101001@@ -317,11 +317,11 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
101002 add_wait_queue(&port->open_wait, &wait);
101003
101004 IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n",
101005- __FILE__, __LINE__, tty->driver->name, port->count);
101006+ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
101007
101008 spin_lock_irqsave(&port->lock, flags);
101009 if (!tty_hung_up_p(filp))
101010- port->count--;
101011+ atomic_dec(&port->count);
101012 port->blocked_open++;
101013 spin_unlock_irqrestore(&port->lock, flags);
101014
101015@@ -356,7 +356,7 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
101016 }
101017
101018 IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n",
101019- __FILE__, __LINE__, tty->driver->name, port->count);
101020+ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
101021
101022 schedule();
101023 }
101024@@ -366,12 +366,12 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
101025
101026 spin_lock_irqsave(&port->lock, flags);
101027 if (!tty_hung_up_p(filp))
101028- port->count++;
101029+ atomic_inc(&port->count);
101030 port->blocked_open--;
101031 spin_unlock_irqrestore(&port->lock, flags);
101032
101033 IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n",
101034- __FILE__, __LINE__, tty->driver->name, port->count);
101035+ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
101036
101037 if (!retval)
101038 port->flags |= ASYNC_NORMAL_ACTIVE;
101039@@ -445,12 +445,12 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp)
101040
101041 /* ++ is not atomic, so this should be protected - Jean II */
101042 spin_lock_irqsave(&self->port.lock, flags);
101043- self->port.count++;
101044+ atomic_inc(&self->port.count);
101045 spin_unlock_irqrestore(&self->port.lock, flags);
101046 tty_port_tty_set(&self->port, tty);
101047
101048 IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
101049- self->line, self->port.count);
101050+ self->line, atomic_read(&self->port.count));
101051
101052 /* Not really used by us, but lets do it anyway */
101053 self->port.low_latency = (self->port.flags & ASYNC_LOW_LATENCY) ? 1 : 0;
101054@@ -987,7 +987,7 @@ static void ircomm_tty_hangup(struct tty_struct *tty)
101055 tty_kref_put(port->tty);
101056 }
101057 port->tty = NULL;
101058- port->count = 0;
101059+ atomic_set(&port->count, 0);
101060 spin_unlock_irqrestore(&port->lock, flags);
101061
101062 wake_up_interruptible(&port->open_wait);
101063@@ -1344,7 +1344,7 @@ static void ircomm_tty_line_info(struct ircomm_tty_cb *self, struct seq_file *m)
101064 seq_putc(m, '\n');
101065
101066 seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
101067- seq_printf(m, "Open count: %d\n", self->port.count);
101068+ seq_printf(m, "Open count: %d\n", atomic_read(&self->port.count));
101069 seq_printf(m, "Max data size: %d\n", self->max_data_size);
101070 seq_printf(m, "Max header size: %d\n", self->max_header_size);
101071
101072diff --git a/net/irda/irproc.c b/net/irda/irproc.c
101073index b9ac598..f88cc56 100644
101074--- a/net/irda/irproc.c
101075+++ b/net/irda/irproc.c
101076@@ -66,7 +66,7 @@ void __init irda_proc_register(void)
101077 {
101078 int i;
101079
101080- proc_irda = proc_mkdir("irda", init_net.proc_net);
101081+ proc_irda = proc_mkdir_restrict("irda", init_net.proc_net);
101082 if (proc_irda == NULL)
101083 return;
101084
101085diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
101086index c4b7218..c7e9f14 100644
101087--- a/net/iucv/af_iucv.c
101088+++ b/net/iucv/af_iucv.c
101089@@ -773,10 +773,10 @@ static int iucv_sock_autobind(struct sock *sk)
101090
101091 write_lock_bh(&iucv_sk_list.lock);
101092
101093- sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
101094+ sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
101095 while (__iucv_get_sock_by_name(name)) {
101096 sprintf(name, "%08x",
101097- atomic_inc_return(&iucv_sk_list.autobind_name));
101098+ atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
101099 }
101100
101101 write_unlock_bh(&iucv_sk_list.lock);
101102@@ -1829,7 +1829,7 @@ static void iucv_callback_txdone(struct iucv_path *path,
101103 spin_lock_irqsave(&list->lock, flags);
101104
101105 while (list_skb != (struct sk_buff *)list) {
101106- if (msg->tag != IUCV_SKB_CB(list_skb)->tag) {
101107+ if (msg->tag == IUCV_SKB_CB(list_skb)->tag) {
101108 this = list_skb;
101109 break;
101110 }
101111diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c
101112index cd5b8ec..f205e6b 100644
101113--- a/net/iucv/iucv.c
101114+++ b/net/iucv/iucv.c
101115@@ -690,7 +690,7 @@ static int iucv_cpu_notify(struct notifier_block *self,
101116 return NOTIFY_OK;
101117 }
101118
101119-static struct notifier_block __refdata iucv_cpu_notifier = {
101120+static struct notifier_block iucv_cpu_notifier = {
101121 .notifier_call = iucv_cpu_notify,
101122 };
101123
101124diff --git a/net/key/af_key.c b/net/key/af_key.c
101125index 7932697..a13d158 100644
101126--- a/net/key/af_key.c
101127+++ b/net/key/af_key.c
101128@@ -3052,10 +3052,10 @@ static int pfkey_send_policy_notify(struct xfrm_policy *xp, int dir, const struc
101129 static u32 get_acqseq(void)
101130 {
101131 u32 res;
101132- static atomic_t acqseq;
101133+ static atomic_unchecked_t acqseq;
101134
101135 do {
101136- res = atomic_inc_return(&acqseq);
101137+ res = atomic_inc_return_unchecked(&acqseq);
101138 } while (!res);
101139 return res;
101140 }
101141diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
101142index 0b44d85..1a7f88b 100644
101143--- a/net/l2tp/l2tp_ip.c
101144+++ b/net/l2tp/l2tp_ip.c
101145@@ -518,6 +518,9 @@ static int l2tp_ip_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *m
101146 if (flags & MSG_OOB)
101147 goto out;
101148
101149+ if (addr_len)
101150+ *addr_len = sizeof(*sin);
101151+
101152 skb = skb_recv_datagram(sk, flags, noblock, &err);
101153 if (!skb)
101154 goto out;
101155@@ -540,7 +543,6 @@ static int l2tp_ip_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *m
101156 sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
101157 sin->sin_port = 0;
101158 memset(&sin->sin_zero, 0, sizeof(sin->sin_zero));
101159- *addr_len = sizeof(*sin);
101160 }
101161 if (inet->cmsg_flags)
101162 ip_cmsg_recv(msg, skb);
101163diff --git a/net/llc/llc_proc.c b/net/llc/llc_proc.c
101164index 1a3c7e0..80f8b0c 100644
101165--- a/net/llc/llc_proc.c
101166+++ b/net/llc/llc_proc.c
101167@@ -247,7 +247,7 @@ int __init llc_proc_init(void)
101168 int rc = -ENOMEM;
101169 struct proc_dir_entry *p;
101170
101171- llc_proc_dir = proc_mkdir("llc", init_net.proc_net);
101172+ llc_proc_dir = proc_mkdir_restrict("llc", init_net.proc_net);
101173 if (!llc_proc_dir)
101174 goto out;
101175
101176diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
101177index 453e974..b3a43a5 100644
101178--- a/net/mac80211/cfg.c
101179+++ b/net/mac80211/cfg.c
101180@@ -839,7 +839,7 @@ static int ieee80211_set_monitor_channel(struct wiphy *wiphy,
101181 ret = ieee80211_vif_use_channel(sdata, chandef,
101182 IEEE80211_CHANCTX_EXCLUSIVE);
101183 }
101184- } else if (local->open_count == local->monitors) {
101185+ } else if (local_read(&local->open_count) == local->monitors) {
101186 local->_oper_chandef = *chandef;
101187 ieee80211_hw_config(local, 0);
101188 }
101189@@ -3356,7 +3356,7 @@ static void ieee80211_mgmt_frame_register(struct wiphy *wiphy,
101190 else
101191 local->probe_req_reg--;
101192
101193- if (!local->open_count)
101194+ if (!local_read(&local->open_count))
101195 break;
101196
101197 ieee80211_queue_work(&local->hw, &local->reconfig_filter);
101198@@ -3819,8 +3819,8 @@ static int ieee80211_cfg_get_channel(struct wiphy *wiphy,
101199 if (chanctx_conf) {
101200 *chandef = chanctx_conf->def;
101201 ret = 0;
101202- } else if (local->open_count > 0 &&
101203- local->open_count == local->monitors &&
101204+ } else if (local_read(&local->open_count) > 0 &&
101205+ local_read(&local->open_count) == local->monitors &&
101206 sdata->vif.type == NL80211_IFTYPE_MONITOR) {
101207 if (local->use_chanctx)
101208 *chandef = local->monitor_chandef;
101209diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
101210index b127902..9dc4947 100644
101211--- a/net/mac80211/ieee80211_i.h
101212+++ b/net/mac80211/ieee80211_i.h
101213@@ -28,6 +28,7 @@
101214 #include <net/ieee80211_radiotap.h>
101215 #include <net/cfg80211.h>
101216 #include <net/mac80211.h>
101217+#include <asm/local.h>
101218 #include "key.h"
101219 #include "sta_info.h"
101220 #include "debug.h"
101221@@ -995,7 +996,7 @@ struct ieee80211_local {
101222 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
101223 spinlock_t queue_stop_reason_lock;
101224
101225- int open_count;
101226+ local_t open_count;
101227 int monitors, cooked_mntrs;
101228 /* number of interfaces with corresponding FIF_ flags */
101229 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll,
101230diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
101231index ce1c443..6cd39e1 100644
101232--- a/net/mac80211/iface.c
101233+++ b/net/mac80211/iface.c
101234@@ -529,7 +529,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
101235 break;
101236 }
101237
101238- if (local->open_count == 0) {
101239+ if (local_read(&local->open_count) == 0) {
101240 res = drv_start(local);
101241 if (res)
101242 goto err_del_bss;
101243@@ -576,7 +576,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
101244 res = drv_add_interface(local, sdata);
101245 if (res)
101246 goto err_stop;
101247- } else if (local->monitors == 0 && local->open_count == 0) {
101248+ } else if (local->monitors == 0 && local_read(&local->open_count) == 0) {
101249 res = ieee80211_add_virtual_monitor(local);
101250 if (res)
101251 goto err_stop;
101252@@ -685,7 +685,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
101253 atomic_inc(&local->iff_promiscs);
101254
101255 if (coming_up)
101256- local->open_count++;
101257+ local_inc(&local->open_count);
101258
101259 if (hw_reconf_flags)
101260 ieee80211_hw_config(local, hw_reconf_flags);
101261@@ -723,7 +723,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
101262 err_del_interface:
101263 drv_remove_interface(local, sdata);
101264 err_stop:
101265- if (!local->open_count)
101266+ if (!local_read(&local->open_count))
101267 drv_stop(local);
101268 err_del_bss:
101269 sdata->bss = NULL;
101270@@ -874,7 +874,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
101271 }
101272
101273 if (going_down)
101274- local->open_count--;
101275+ local_dec(&local->open_count);
101276
101277 switch (sdata->vif.type) {
101278 case NL80211_IFTYPE_AP_VLAN:
101279@@ -933,7 +933,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
101280 }
101281 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
101282
101283- if (local->open_count == 0)
101284+ if (local_read(&local->open_count) == 0)
101285 ieee80211_clear_tx_pending(local);
101286
101287 /*
101288@@ -973,7 +973,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
101289
101290 ieee80211_recalc_ps(local, -1);
101291
101292- if (local->open_count == 0) {
101293+ if (local_read(&local->open_count) == 0) {
101294 ieee80211_stop_device(local);
101295
101296 /* no reconfiguring after stop! */
101297@@ -984,7 +984,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
101298 ieee80211_configure_filter(local);
101299 ieee80211_hw_config(local, hw_reconf_flags);
101300
101301- if (local->monitors == local->open_count)
101302+ if (local->monitors == local_read(&local->open_count))
101303 ieee80211_add_virtual_monitor(local);
101304 }
101305
101306diff --git a/net/mac80211/main.c b/net/mac80211/main.c
101307index c7a7a86..a74f57b 100644
101308--- a/net/mac80211/main.c
101309+++ b/net/mac80211/main.c
101310@@ -174,7 +174,7 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
101311 changed &= ~(IEEE80211_CONF_CHANGE_CHANNEL |
101312 IEEE80211_CONF_CHANGE_POWER);
101313
101314- if (changed && local->open_count) {
101315+ if (changed && local_read(&local->open_count)) {
101316 ret = drv_config(local, changed);
101317 /*
101318 * Goal:
101319diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
101320index d478b88..8c8d157 100644
101321--- a/net/mac80211/pm.c
101322+++ b/net/mac80211/pm.c
101323@@ -12,7 +12,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
101324 struct ieee80211_sub_if_data *sdata;
101325 struct sta_info *sta;
101326
101327- if (!local->open_count)
101328+ if (!local_read(&local->open_count))
101329 goto suspend;
101330
101331 ieee80211_scan_cancel(local);
101332@@ -58,7 +58,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
101333 cancel_work_sync(&local->dynamic_ps_enable_work);
101334 del_timer_sync(&local->dynamic_ps_timer);
101335
101336- local->wowlan = wowlan && local->open_count;
101337+ local->wowlan = wowlan && local_read(&local->open_count);
101338 if (local->wowlan) {
101339 int err = drv_suspend(local, wowlan);
101340 if (err < 0) {
101341@@ -123,7 +123,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
101342 WARN_ON(!list_empty(&local->chanctx_list));
101343
101344 /* stop hardware - this must stop RX */
101345- if (local->open_count)
101346+ if (local_read(&local->open_count))
101347 ieee80211_stop_device(local);
101348
101349 suspend:
101350diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
101351index 22b223f..ab70070 100644
101352--- a/net/mac80211/rate.c
101353+++ b/net/mac80211/rate.c
101354@@ -734,7 +734,7 @@ int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local,
101355
101356 ASSERT_RTNL();
101357
101358- if (local->open_count)
101359+ if (local_read(&local->open_count))
101360 return -EBUSY;
101361
101362 if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) {
101363diff --git a/net/mac80211/rc80211_pid_debugfs.c b/net/mac80211/rc80211_pid_debugfs.c
101364index 6ff1346..936ca9a 100644
101365--- a/net/mac80211/rc80211_pid_debugfs.c
101366+++ b/net/mac80211/rc80211_pid_debugfs.c
101367@@ -193,7 +193,7 @@ static ssize_t rate_control_pid_events_read(struct file *file, char __user *buf,
101368
101369 spin_unlock_irqrestore(&events->lock, status);
101370
101371- if (copy_to_user(buf, pb, p))
101372+ if (p > sizeof(pb) || copy_to_user(buf, pb, p))
101373 return -EFAULT;
101374
101375 return p;
101376diff --git a/net/mac80211/util.c b/net/mac80211/util.c
101377index 6427625..afa5a5a 100644
101378--- a/net/mac80211/util.c
101379+++ b/net/mac80211/util.c
101380@@ -1483,7 +1483,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
101381 }
101382 #endif
101383 /* everything else happens only if HW was up & running */
101384- if (!local->open_count)
101385+ if (!local_read(&local->open_count))
101386 goto wake_up;
101387
101388 /*
101389@@ -1708,7 +1708,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
101390 local->in_reconfig = false;
101391 barrier();
101392
101393- if (local->monitors == local->open_count && local->monitors > 0)
101394+ if (local->monitors == local_read(&local->open_count) && local->monitors > 0)
101395 ieee80211_add_virtual_monitor(local);
101396
101397 /*
101398diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
101399index e9410d1..77b6378 100644
101400--- a/net/netfilter/Kconfig
101401+++ b/net/netfilter/Kconfig
101402@@ -1081,6 +1081,16 @@ config NETFILTER_XT_MATCH_ESP
101403
101404 To compile it as a module, choose M here. If unsure, say N.
101405
101406+config NETFILTER_XT_MATCH_GRADM
101407+ tristate '"gradm" match support'
101408+ depends on NETFILTER_XTABLES && NETFILTER_ADVANCED
101409+ depends on GRKERNSEC && !GRKERNSEC_NO_RBAC
101410+ ---help---
101411+ The gradm match allows to match on grsecurity RBAC being enabled.
101412+ It is useful when iptables rules are applied early on bootup to
101413+ prevent connections to the machine (except from a trusted host)
101414+ while the RBAC system is disabled.
101415+
101416 config NETFILTER_XT_MATCH_HASHLIMIT
101417 tristate '"hashlimit" match support'
101418 depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
101419diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
101420index bffdad7..f9317d1 100644
101421--- a/net/netfilter/Makefile
101422+++ b/net/netfilter/Makefile
101423@@ -133,6 +133,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_DEVGROUP) += xt_devgroup.o
101424 obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
101425 obj-$(CONFIG_NETFILTER_XT_MATCH_ECN) += xt_ecn.o
101426 obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
101427+obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o
101428 obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
101429 obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
101430 obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
101431diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
101432index de770ec..3fc49d2 100644
101433--- a/net/netfilter/ipset/ip_set_core.c
101434+++ b/net/netfilter/ipset/ip_set_core.c
101435@@ -1922,7 +1922,7 @@ done:
101436 return ret;
101437 }
101438
101439-static struct nf_sockopt_ops so_set __read_mostly = {
101440+static struct nf_sockopt_ops so_set = {
101441 .pf = PF_INET,
101442 .get_optmin = SO_IP_SET,
101443 .get_optmax = SO_IP_SET + 1,
101444diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
101445index a8eb0a8..86f2de4 100644
101446--- a/net/netfilter/ipvs/ip_vs_conn.c
101447+++ b/net/netfilter/ipvs/ip_vs_conn.c
101448@@ -556,7 +556,7 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, struct ip_vs_dest *dest)
101449 /* Increase the refcnt counter of the dest */
101450 ip_vs_dest_hold(dest);
101451
101452- conn_flags = atomic_read(&dest->conn_flags);
101453+ conn_flags = atomic_read_unchecked(&dest->conn_flags);
101454 if (cp->protocol != IPPROTO_UDP)
101455 conn_flags &= ~IP_VS_CONN_F_ONE_PACKET;
101456 flags = cp->flags;
101457@@ -900,7 +900,7 @@ ip_vs_conn_new(const struct ip_vs_conn_param *p,
101458
101459 cp->control = NULL;
101460 atomic_set(&cp->n_control, 0);
101461- atomic_set(&cp->in_pkts, 0);
101462+ atomic_set_unchecked(&cp->in_pkts, 0);
101463
101464 cp->packet_xmit = NULL;
101465 cp->app = NULL;
101466@@ -1188,7 +1188,7 @@ static inline int todrop_entry(struct ip_vs_conn *cp)
101467
101468 /* Don't drop the entry if its number of incoming packets is not
101469 located in [0, 8] */
101470- i = atomic_read(&cp->in_pkts);
101471+ i = atomic_read_unchecked(&cp->in_pkts);
101472 if (i > 8 || i < 0) return 0;
101473
101474 if (!todrop_rate[i]) return 0;
101475diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
101476index 4f26ee4..6a9d7c3 100644
101477--- a/net/netfilter/ipvs/ip_vs_core.c
101478+++ b/net/netfilter/ipvs/ip_vs_core.c
101479@@ -567,7 +567,7 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
101480 ret = cp->packet_xmit(skb, cp, pd->pp, iph);
101481 /* do not touch skb anymore */
101482
101483- atomic_inc(&cp->in_pkts);
101484+ atomic_inc_unchecked(&cp->in_pkts);
101485 ip_vs_conn_put(cp);
101486 return ret;
101487 }
101488@@ -1706,7 +1706,7 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
101489 if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
101490 pkts = sysctl_sync_threshold(ipvs);
101491 else
101492- pkts = atomic_add_return(1, &cp->in_pkts);
101493+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
101494
101495 if (ipvs->sync_state & IP_VS_STATE_MASTER)
101496 ip_vs_sync_conn(net, cp, pkts);
101497diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
101498index 35be035..50f8834 100644
101499--- a/net/netfilter/ipvs/ip_vs_ctl.c
101500+++ b/net/netfilter/ipvs/ip_vs_ctl.c
101501@@ -794,7 +794,7 @@ __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest,
101502 */
101503 ip_vs_rs_hash(ipvs, dest);
101504 }
101505- atomic_set(&dest->conn_flags, conn_flags);
101506+ atomic_set_unchecked(&dest->conn_flags, conn_flags);
101507
101508 /* bind the service */
101509 old_svc = rcu_dereference_protected(dest->svc, 1);
101510@@ -1654,7 +1654,7 @@ proc_do_sync_ports(struct ctl_table *table, int write,
101511 * align with netns init in ip_vs_control_net_init()
101512 */
101513
101514-static struct ctl_table vs_vars[] = {
101515+static ctl_table_no_const vs_vars[] __read_only = {
101516 {
101517 .procname = "amemthresh",
101518 .maxlen = sizeof(int),
101519@@ -2075,7 +2075,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
101520 " %-7s %-6d %-10d %-10d\n",
101521 &dest->addr.in6,
101522 ntohs(dest->port),
101523- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
101524+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
101525 atomic_read(&dest->weight),
101526 atomic_read(&dest->activeconns),
101527 atomic_read(&dest->inactconns));
101528@@ -2086,7 +2086,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
101529 "%-7s %-6d %-10d %-10d\n",
101530 ntohl(dest->addr.ip),
101531 ntohs(dest->port),
101532- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
101533+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
101534 atomic_read(&dest->weight),
101535 atomic_read(&dest->activeconns),
101536 atomic_read(&dest->inactconns));
101537@@ -2564,7 +2564,7 @@ __ip_vs_get_dest_entries(struct net *net, const struct ip_vs_get_dests *get,
101538
101539 entry.addr = dest->addr.ip;
101540 entry.port = dest->port;
101541- entry.conn_flags = atomic_read(&dest->conn_flags);
101542+ entry.conn_flags = atomic_read_unchecked(&dest->conn_flags);
101543 entry.weight = atomic_read(&dest->weight);
101544 entry.u_threshold = dest->u_threshold;
101545 entry.l_threshold = dest->l_threshold;
101546@@ -3107,7 +3107,7 @@ static int ip_vs_genl_fill_dest(struct sk_buff *skb, struct ip_vs_dest *dest)
101547 if (nla_put(skb, IPVS_DEST_ATTR_ADDR, sizeof(dest->addr), &dest->addr) ||
101548 nla_put_be16(skb, IPVS_DEST_ATTR_PORT, dest->port) ||
101549 nla_put_u32(skb, IPVS_DEST_ATTR_FWD_METHOD,
101550- (atomic_read(&dest->conn_flags) &
101551+ (atomic_read_unchecked(&dest->conn_flags) &
101552 IP_VS_CONN_F_FWD_MASK)) ||
101553 nla_put_u32(skb, IPVS_DEST_ATTR_WEIGHT,
101554 atomic_read(&dest->weight)) ||
101555@@ -3580,7 +3580,7 @@ out:
101556 }
101557
101558
101559-static const struct genl_ops ip_vs_genl_ops[] __read_mostly = {
101560+static const struct genl_ops ip_vs_genl_ops[] = {
101561 {
101562 .cmd = IPVS_CMD_NEW_SERVICE,
101563 .flags = GENL_ADMIN_PERM,
101564@@ -3697,7 +3697,7 @@ static int __net_init ip_vs_control_net_init_sysctl(struct net *net)
101565 {
101566 int idx;
101567 struct netns_ipvs *ipvs = net_ipvs(net);
101568- struct ctl_table *tbl;
101569+ ctl_table_no_const *tbl;
101570
101571 atomic_set(&ipvs->dropentry, 0);
101572 spin_lock_init(&ipvs->dropentry_lock);
101573diff --git a/net/netfilter/ipvs/ip_vs_lblc.c b/net/netfilter/ipvs/ip_vs_lblc.c
101574index ca056a3..9cf01ef 100644
101575--- a/net/netfilter/ipvs/ip_vs_lblc.c
101576+++ b/net/netfilter/ipvs/ip_vs_lblc.c
101577@@ -118,7 +118,7 @@ struct ip_vs_lblc_table {
101578 * IPVS LBLC sysctl table
101579 */
101580 #ifdef CONFIG_SYSCTL
101581-static struct ctl_table vs_vars_table[] = {
101582+static ctl_table_no_const vs_vars_table[] __read_only = {
101583 {
101584 .procname = "lblc_expiration",
101585 .data = NULL,
101586diff --git a/net/netfilter/ipvs/ip_vs_lblcr.c b/net/netfilter/ipvs/ip_vs_lblcr.c
101587index 3f21a2f..a112e85 100644
101588--- a/net/netfilter/ipvs/ip_vs_lblcr.c
101589+++ b/net/netfilter/ipvs/ip_vs_lblcr.c
101590@@ -289,7 +289,7 @@ struct ip_vs_lblcr_table {
101591 * IPVS LBLCR sysctl table
101592 */
101593
101594-static struct ctl_table vs_vars_table[] = {
101595+static ctl_table_no_const vs_vars_table[] __read_only = {
101596 {
101597 .procname = "lblcr_expiration",
101598 .data = NULL,
101599diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
101600index db80126..ef7110e 100644
101601--- a/net/netfilter/ipvs/ip_vs_sync.c
101602+++ b/net/netfilter/ipvs/ip_vs_sync.c
101603@@ -609,7 +609,7 @@ static void ip_vs_sync_conn_v0(struct net *net, struct ip_vs_conn *cp,
101604 cp = cp->control;
101605 if (cp) {
101606 if (cp->flags & IP_VS_CONN_F_TEMPLATE)
101607- pkts = atomic_add_return(1, &cp->in_pkts);
101608+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
101609 else
101610 pkts = sysctl_sync_threshold(ipvs);
101611 ip_vs_sync_conn(net, cp->control, pkts);
101612@@ -771,7 +771,7 @@ control:
101613 if (!cp)
101614 return;
101615 if (cp->flags & IP_VS_CONN_F_TEMPLATE)
101616- pkts = atomic_add_return(1, &cp->in_pkts);
101617+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
101618 else
101619 pkts = sysctl_sync_threshold(ipvs);
101620 goto sloop;
101621@@ -895,7 +895,7 @@ static void ip_vs_proc_conn(struct net *net, struct ip_vs_conn_param *param,
101622
101623 if (opt)
101624 memcpy(&cp->in_seq, opt, sizeof(*opt));
101625- atomic_set(&cp->in_pkts, sysctl_sync_threshold(ipvs));
101626+ atomic_set_unchecked(&cp->in_pkts, sysctl_sync_threshold(ipvs));
101627 cp->state = state;
101628 cp->old_state = cp->state;
101629 /*
101630diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
101631index c47444e..b0961c6 100644
101632--- a/net/netfilter/ipvs/ip_vs_xmit.c
101633+++ b/net/netfilter/ipvs/ip_vs_xmit.c
101634@@ -1102,7 +1102,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
101635 else
101636 rc = NF_ACCEPT;
101637 /* do not touch skb anymore */
101638- atomic_inc(&cp->in_pkts);
101639+ atomic_inc_unchecked(&cp->in_pkts);
101640 goto out;
101641 }
101642
101643@@ -1194,7 +1194,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
101644 else
101645 rc = NF_ACCEPT;
101646 /* do not touch skb anymore */
101647- atomic_inc(&cp->in_pkts);
101648+ atomic_inc_unchecked(&cp->in_pkts);
101649 goto out;
101650 }
101651
101652diff --git a/net/netfilter/nf_conntrack_acct.c b/net/netfilter/nf_conntrack_acct.c
101653index a4b5e2a..13b1de3 100644
101654--- a/net/netfilter/nf_conntrack_acct.c
101655+++ b/net/netfilter/nf_conntrack_acct.c
101656@@ -62,7 +62,7 @@ static struct nf_ct_ext_type acct_extend __read_mostly = {
101657 #ifdef CONFIG_SYSCTL
101658 static int nf_conntrack_acct_init_sysctl(struct net *net)
101659 {
101660- struct ctl_table *table;
101661+ ctl_table_no_const *table;
101662
101663 table = kmemdup(acct_sysctl_table, sizeof(acct_sysctl_table),
101664 GFP_KERNEL);
101665diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
101666index 356bef5..99932cb 100644
101667--- a/net/netfilter/nf_conntrack_core.c
101668+++ b/net/netfilter/nf_conntrack_core.c
101669@@ -1627,6 +1627,10 @@ void nf_conntrack_init_end(void)
101670 #define DYING_NULLS_VAL ((1<<30)+1)
101671 #define TEMPLATE_NULLS_VAL ((1<<30)+2)
101672
101673+#ifdef CONFIG_GRKERNSEC_HIDESYM
101674+static atomic_unchecked_t conntrack_cache_id = ATOMIC_INIT(0);
101675+#endif
101676+
101677 int nf_conntrack_init_net(struct net *net)
101678 {
101679 int ret;
101680@@ -1641,7 +1645,11 @@ int nf_conntrack_init_net(struct net *net)
101681 goto err_stat;
101682 }
101683
101684+#ifdef CONFIG_GRKERNSEC_HIDESYM
101685+ net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%08lx", atomic_inc_return_unchecked(&conntrack_cache_id));
101686+#else
101687 net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%p", net);
101688+#endif
101689 if (!net->ct.slabname) {
101690 ret = -ENOMEM;
101691 goto err_slabname;
101692diff --git a/net/netfilter/nf_conntrack_ecache.c b/net/netfilter/nf_conntrack_ecache.c
101693index 1df1761..ce8b88a 100644
101694--- a/net/netfilter/nf_conntrack_ecache.c
101695+++ b/net/netfilter/nf_conntrack_ecache.c
101696@@ -188,7 +188,7 @@ static struct nf_ct_ext_type event_extend __read_mostly = {
101697 #ifdef CONFIG_SYSCTL
101698 static int nf_conntrack_event_init_sysctl(struct net *net)
101699 {
101700- struct ctl_table *table;
101701+ ctl_table_no_const *table;
101702
101703 table = kmemdup(event_sysctl_table, sizeof(event_sysctl_table),
101704 GFP_KERNEL);
101705diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c
101706index 974a2a4..52cc6ff 100644
101707--- a/net/netfilter/nf_conntrack_helper.c
101708+++ b/net/netfilter/nf_conntrack_helper.c
101709@@ -57,7 +57,7 @@ static struct ctl_table helper_sysctl_table[] = {
101710
101711 static int nf_conntrack_helper_init_sysctl(struct net *net)
101712 {
101713- struct ctl_table *table;
101714+ ctl_table_no_const *table;
101715
101716 table = kmemdup(helper_sysctl_table, sizeof(helper_sysctl_table),
101717 GFP_KERNEL);
101718diff --git a/net/netfilter/nf_conntrack_proto.c b/net/netfilter/nf_conntrack_proto.c
101719index b65d586..beec902 100644
101720--- a/net/netfilter/nf_conntrack_proto.c
101721+++ b/net/netfilter/nf_conntrack_proto.c
101722@@ -52,7 +52,7 @@ nf_ct_register_sysctl(struct net *net,
101723
101724 static void
101725 nf_ct_unregister_sysctl(struct ctl_table_header **header,
101726- struct ctl_table **table,
101727+ ctl_table_no_const **table,
101728 unsigned int users)
101729 {
101730 if (users > 0)
101731diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
101732index f641751..d3c5b51 100644
101733--- a/net/netfilter/nf_conntrack_standalone.c
101734+++ b/net/netfilter/nf_conntrack_standalone.c
101735@@ -471,7 +471,7 @@ static struct ctl_table nf_ct_netfilter_table[] = {
101736
101737 static int nf_conntrack_standalone_init_sysctl(struct net *net)
101738 {
101739- struct ctl_table *table;
101740+ ctl_table_no_const *table;
101741
101742 table = kmemdup(nf_ct_sysctl_table, sizeof(nf_ct_sysctl_table),
101743 GFP_KERNEL);
101744diff --git a/net/netfilter/nf_conntrack_timestamp.c b/net/netfilter/nf_conntrack_timestamp.c
101745index 7a394df..bd91a8a 100644
101746--- a/net/netfilter/nf_conntrack_timestamp.c
101747+++ b/net/netfilter/nf_conntrack_timestamp.c
101748@@ -42,7 +42,7 @@ static struct nf_ct_ext_type tstamp_extend __read_mostly = {
101749 #ifdef CONFIG_SYSCTL
101750 static int nf_conntrack_tstamp_init_sysctl(struct net *net)
101751 {
101752- struct ctl_table *table;
101753+ ctl_table_no_const *table;
101754
101755 table = kmemdup(tstamp_sysctl_table, sizeof(tstamp_sysctl_table),
101756 GFP_KERNEL);
101757diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c
101758index 85296d4..8becdec 100644
101759--- a/net/netfilter/nf_log.c
101760+++ b/net/netfilter/nf_log.c
101761@@ -243,7 +243,7 @@ static const struct file_operations nflog_file_ops = {
101762
101763 #ifdef CONFIG_SYSCTL
101764 static char nf_log_sysctl_fnames[NFPROTO_NUMPROTO-NFPROTO_UNSPEC][3];
101765-static struct ctl_table nf_log_sysctl_table[NFPROTO_NUMPROTO+1];
101766+static ctl_table_no_const nf_log_sysctl_table[NFPROTO_NUMPROTO+1] __read_only;
101767
101768 static int nf_log_proc_dostring(struct ctl_table *table, int write,
101769 void __user *buffer, size_t *lenp, loff_t *ppos)
101770@@ -274,14 +274,16 @@ static int nf_log_proc_dostring(struct ctl_table *table, int write,
101771 rcu_assign_pointer(net->nf.nf_loggers[tindex], logger);
101772 mutex_unlock(&nf_log_mutex);
101773 } else {
101774+ ctl_table_no_const nf_log_table = *table;
101775+
101776 mutex_lock(&nf_log_mutex);
101777 logger = rcu_dereference_protected(net->nf.nf_loggers[tindex],
101778 lockdep_is_held(&nf_log_mutex));
101779 if (!logger)
101780- table->data = "NONE";
101781+ nf_log_table.data = "NONE";
101782 else
101783- table->data = logger->name;
101784- r = proc_dostring(table, write, buffer, lenp, ppos);
101785+ nf_log_table.data = logger->name;
101786+ r = proc_dostring(&nf_log_table, write, buffer, lenp, ppos);
101787 mutex_unlock(&nf_log_mutex);
101788 }
101789
101790diff --git a/net/netfilter/nf_sockopt.c b/net/netfilter/nf_sockopt.c
101791index f042ae5..30ea486 100644
101792--- a/net/netfilter/nf_sockopt.c
101793+++ b/net/netfilter/nf_sockopt.c
101794@@ -45,7 +45,7 @@ int nf_register_sockopt(struct nf_sockopt_ops *reg)
101795 }
101796 }
101797
101798- list_add(&reg->list, &nf_sockopts);
101799+ pax_list_add((struct list_head *)&reg->list, &nf_sockopts);
101800 out:
101801 mutex_unlock(&nf_sockopt_mutex);
101802 return ret;
101803@@ -55,7 +55,7 @@ EXPORT_SYMBOL(nf_register_sockopt);
101804 void nf_unregister_sockopt(struct nf_sockopt_ops *reg)
101805 {
101806 mutex_lock(&nf_sockopt_mutex);
101807- list_del(&reg->list);
101808+ pax_list_del((struct list_head *)&reg->list);
101809 mutex_unlock(&nf_sockopt_mutex);
101810 }
101811 EXPORT_SYMBOL(nf_unregister_sockopt);
101812diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
101813index c68e5e0..8d52d50 100644
101814--- a/net/netfilter/nf_tables_api.c
101815+++ b/net/netfilter/nf_tables_api.c
101816@@ -152,8 +152,8 @@ nf_tables_chain_type_lookup(const struct nft_af_info *afi,
101817 #ifdef CONFIG_MODULES
101818 if (autoload) {
101819 nfnl_unlock(NFNL_SUBSYS_NFTABLES);
101820- request_module("nft-chain-%u-%*.s", afi->family,
101821- nla_len(nla)-1, (const char *)nla_data(nla));
101822+ request_module("nft-chain-%u-%.*s", afi->family,
101823+ nla_len(nla), (const char *)nla_data(nla));
101824 nfnl_lock(NFNL_SUBSYS_NFTABLES);
101825 type = __nf_tables_chain_type_lookup(afi->family, nla);
101826 if (type != NULL)
101827diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
101828index a155d19..726b0f2 100644
101829--- a/net/netfilter/nfnetlink_log.c
101830+++ b/net/netfilter/nfnetlink_log.c
101831@@ -82,7 +82,7 @@ static int nfnl_log_net_id __read_mostly;
101832 struct nfnl_log_net {
101833 spinlock_t instances_lock;
101834 struct hlist_head instance_table[INSTANCE_BUCKETS];
101835- atomic_t global_seq;
101836+ atomic_unchecked_t global_seq;
101837 };
101838
101839 static struct nfnl_log_net *nfnl_log_pernet(struct net *net)
101840@@ -564,7 +564,7 @@ __build_packet_message(struct nfnl_log_net *log,
101841 /* global sequence number */
101842 if ((inst->flags & NFULNL_CFG_F_SEQ_GLOBAL) &&
101843 nla_put_be32(inst->skb, NFULA_SEQ_GLOBAL,
101844- htonl(atomic_inc_return(&log->global_seq))))
101845+ htonl(atomic_inc_return_unchecked(&log->global_seq))))
101846 goto nla_put_failure;
101847
101848 if (data_len) {
101849diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c
101850index 82cb823..5685dd5 100644
101851--- a/net/netfilter/nft_compat.c
101852+++ b/net/netfilter/nft_compat.c
101853@@ -216,7 +216,7 @@ target_dump_info(struct sk_buff *skb, const struct xt_target *t, const void *in)
101854 /* We want to reuse existing compat_to_user */
101855 old_fs = get_fs();
101856 set_fs(KERNEL_DS);
101857- t->compat_to_user(out, in);
101858+ t->compat_to_user((void __force_user *)out, in);
101859 set_fs(old_fs);
101860 ret = nla_put(skb, NFTA_TARGET_INFO, XT_ALIGN(t->targetsize), out);
101861 kfree(out);
101862@@ -403,7 +403,7 @@ match_dump_info(struct sk_buff *skb, const struct xt_match *m, const void *in)
101863 /* We want to reuse existing compat_to_user */
101864 old_fs = get_fs();
101865 set_fs(KERNEL_DS);
101866- m->compat_to_user(out, in);
101867+ m->compat_to_user((void __force_user *)out, in);
101868 set_fs(old_fs);
101869 ret = nla_put(skb, NFTA_MATCH_INFO, XT_ALIGN(m->matchsize), out);
101870 kfree(out);
101871diff --git a/net/netfilter/xt_gradm.c b/net/netfilter/xt_gradm.c
101872new file mode 100644
101873index 0000000..c566332
101874--- /dev/null
101875+++ b/net/netfilter/xt_gradm.c
101876@@ -0,0 +1,51 @@
101877+/*
101878+ * gradm match for netfilter
101879